get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/94125/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 94125,
    "url": "https://patches.dpdk.org/api/patches/94125/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20210613000652.28191-32-ajit.khaparde@broadcom.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210613000652.28191-32-ajit.khaparde@broadcom.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210613000652.28191-32-ajit.khaparde@broadcom.com",
    "date": "2021-06-13T00:06:25",
    "name": "[v2,31/58] net/bnxt: modify VXLAN decap for multichannel mode",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "2114d0a69968712598cb401dfc467963119f958c",
    "submitter": {
        "id": 501,
        "url": "https://patches.dpdk.org/api/people/501/?format=api",
        "name": "Ajit Khaparde",
        "email": "ajit.khaparde@broadcom.com"
    },
    "delegate": {
        "id": 1766,
        "url": "https://patches.dpdk.org/api/users/1766/?format=api",
        "username": "ajitkhaparde",
        "first_name": "Ajit",
        "last_name": "Khaparde",
        "email": "ajit.khaparde@broadcom.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20210613000652.28191-32-ajit.khaparde@broadcom.com/mbox/",
    "series": [
        {
            "id": 17305,
            "url": "https://patches.dpdk.org/api/series/17305/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=17305",
            "date": "2021-06-13T00:05:54",
            "name": "enhancements to host based flow table management",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/17305/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/94125/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/94125/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 49085A0C41;\n\tSun, 13 Jun 2021 02:11:36 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 51F07411E2;\n\tSun, 13 Jun 2021 02:07:51 +0200 (CEST)",
            "from mail-pf1-f169.google.com (mail-pf1-f169.google.com\n [209.85.210.169])\n by mails.dpdk.org (Postfix) with ESMTP id 12493411C5\n for <dev@dpdk.org>; Sun, 13 Jun 2021 02:07:48 +0200 (CEST)",
            "by mail-pf1-f169.google.com with SMTP id y15so7615125pfl.4\n for <dev@dpdk.org>; Sat, 12 Jun 2021 17:07:47 -0700 (PDT)",
            "from localhost.localdomain ([192.19.223.252])\n by smtp.gmail.com with ESMTPSA id gg22sm12774609pjb.17.2021.06.12.17.07.45\n (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128);\n Sat, 12 Jun 2021 17:07:45 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=broadcom.com;\n s=google;\n h=from:to:cc:subject:date:message-id:in-reply-to:references\n :mime-version; bh=B5dU62YcrLO/2a9f71p2D52BWxij/47wqY1vLidWq98=;\n b=PLkEjEZsp9jbmQNYnANPlcifu0OboAWTvwwvWfwR7eJZ1ajV1jlfw07vEES4b77o3m\n ArkE9vCjtr7ilF2QNaBHqSTq+hDrKGZPGM0JshDS/SGuQsY9P7lBMGaqJF6xew3g1NGd\n IXi0P1YvXSvqibItgWkB2EjUeI/865VOkbqbY=",
        "X-Google-DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed;\n d=1e100.net; s=20161025;\n h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to\n :references:mime-version;\n bh=B5dU62YcrLO/2a9f71p2D52BWxij/47wqY1vLidWq98=;\n b=P0z0AUidwFGoB88UJD1uRk5d47D9BukjO0l2BkFDNPjQTRo9ED29jmOdQ5Q5ffJqsN\n sQzHU1GNY9Qgt5P8HbxdarC+xLRamh2EUVghNrep5oZa7LdDumB0aWoB0NuFa0PRIGDL\n e7N+p2faiH4zLNPQX9Cwu4TDepFnJmBF4dd4HQ3RXlo595HqnXPg1K9S6vNzBaqNBoa9\n VRZ+NI83CfnA8UZi0ImFwRRhNHNII+z7BWWByXV6EXtsM9xRh4qe09l4E2Mp3+xb55dA\n 7kSlD3fp3DMgWdiU+qO/6uFxI7fgVd+t0+oPSBBbys9vnqWktXLB6vGb7EKuarB+TNkP\n d4pw==",
        "X-Gm-Message-State": "AOAM530bBaT0ZNYBmlfms7u0UNkg4dx4lkFwRtm3Ypp5yeUt/2pACA00\n EPNEu5pXeB2nPEuI9g+Sf3bOAor9KEyPgFiGc8aplQGgRckZG7SUz5NeWmR10+lpYWYa1at8b/p\n IDyOxE/s0PWWD59rfLX5au9CF3ZG9mVf7eVU/sGpbWjqEmqiMFft/P3HUHl95wa4=",
        "X-Google-Smtp-Source": "\n ABdhPJym9UZRYpoX7uz12HzggHTFbcEAB0rbKmK5wrFtunYtUQHrNeolunuCD84JEogt5TBUul5PmA==",
        "X-Received": "by 2002:a62:3344:0:b029:28c:6f0f:cb90 with SMTP id\n z65-20020a6233440000b029028c6f0fcb90mr14860464pfz.58.1623542866481;\n Sat, 12 Jun 2021 17:07:46 -0700 (PDT)",
        "From": "Ajit Khaparde <ajit.khaparde@broadcom.com>",
        "To": "dev@dpdk.org",
        "Cc": "Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>,\n Shahaji Bhosle <sbhosle@broadcom.com>",
        "Date": "Sat, 12 Jun 2021 17:06:25 -0700",
        "Message-Id": "<20210613000652.28191-32-ajit.khaparde@broadcom.com>",
        "X-Mailer": "git-send-email 2.21.1 (Apple Git-122.3)",
        "In-Reply-To": "<20210613000652.28191-1-ajit.khaparde@broadcom.com>",
        "References": "<20210530085929.29695-1-venkatkumar.duvvuru@broadcom.com>\n <20210613000652.28191-1-ajit.khaparde@broadcom.com>",
        "MIME-Version": "1.0",
        "Content-Type": "multipart/signed; protocol=\"application/pkcs7-signature\";\n micalg=sha-256; boundary=\"000000000000deff8105c49a87c9\"",
        "X-Content-Filtered-By": "Mailman/MimeDel 2.1.29",
        "Subject": "[dpdk-dev] [PATCH v2 31/58] net/bnxt: modify VXLAN decap for\n multichannel mode",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>\n\nThe driver is using physical port id as the index into\nthe tunnel inner flow table. However, this will not work in case\nof multichannel mode where multiple physical functions are going\nto share the same physical port id.\n\nWhen tunnel inner flow offload request comes before tunnel\nouter flow offload request, the driver caches the tunnel inner flow\ndetails and programs it in the hardware after installing the tunnel\nouter flow in the hardware. If more than one tunnel inner flow arrives\nbefore tunnel outer flow is offloaded, the driver rejects any such\ntunnel inner flow offload requests.\n\nThis patch fixes the above two problems by\n1. Using dpdk port id as the index to store tunnel inner info.\n2. Caching any number of tunnel inner flow offload requests that come\n   before offloading tunnel outer flow offload request\n\nSigned-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>\nReviewed-by: Shahaji Bhosle <sbhosle@broadcom.com>\nReviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>\n---\n drivers/net/bnxt/tf_ulp/bnxt_ulp.c            |   3 +\n drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c       |   3 +-\n drivers/net/bnxt/tf_ulp/ulp_template_struct.h |   1 +\n drivers/net/bnxt/tf_ulp/ulp_tun.c             | 192 ++++++++++++------\n drivers/net/bnxt/tf_ulp/ulp_tun.h             |  30 ++-\n 5 files changed, 150 insertions(+), 79 deletions(-)",
    "diff": "diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c\nindex 5c805eef97..59fb530fb1 100644\n--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c\n+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c\n@@ -22,6 +22,7 @@\n #include \"ulp_flow_db.h\"\n #include \"ulp_mapper.h\"\n #include \"ulp_port_db.h\"\n+#include \"ulp_tun.h\"\n \n /* Linked list of all TF sessions. */\n STAILQ_HEAD(, bnxt_ulp_session_state) bnxt_ulp_session_list =\n@@ -533,6 +534,8 @@ ulp_ctx_init(struct bnxt *bp,\n \tif (rc)\n \t\tgoto error_deinit;\n \n+\tulp_tun_tbl_init(ulp_data->tun_tbl);\n+\n \tbnxt_ulp_cntxt_tfp_set(bp->ulp_ctx, &bp->tfp);\n \treturn rc;\n \ndiff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c\nindex ddf38ed931..836e94bc60 100644\n--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c\n+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c\n@@ -79,6 +79,7 @@ bnxt_ulp_init_mapper_params(struct bnxt_ulp_mapper_create_parms *mapper_cparms,\n \t\t\t    struct ulp_rte_parser_params *params,\n \t\t\t    enum bnxt_ulp_fdb_type flow_type)\n {\n+\tmemset(mapper_cparms, 0, sizeof(*mapper_cparms));\n \tmapper_cparms->flow_type\t= flow_type;\n \tmapper_cparms->app_priority\t= params->priority;\n \tmapper_cparms->dir_attr\t\t= params->dir_attr;\n@@ -176,7 +177,7 @@ bnxt_ulp_flow_create(struct rte_eth_dev *dev,\n \tparams.fid = fid;\n \tparams.func_id = func_id;\n \tparams.priority = attr->priority;\n-\tparams.port_id = bnxt_get_phy_port_id(dev->data->port_id);\n+\tparams.port_id = dev->data->port_id;\n \t/* Perform the rte flow post process */\n \tret = bnxt_ulp_rte_parser_post_process(&params);\n \tif (ret == BNXT_TF_RC_ERROR)\ndiff --git a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h\nindex ee17390358..b253aefe8d 100644\n--- a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h\n+++ b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h\n@@ -62,6 +62,7 @@ struct ulp_rte_act_prop {\n \n /* Structure to be used for passing all the parser functions */\n struct ulp_rte_parser_params {\n+\tSTAILQ_ENTRY(ulp_rte_parser_params)  next;\n \tstruct ulp_rte_hdr_bitmap\thdr_bitmap;\n \tstruct ulp_rte_hdr_bitmap\thdr_fp_bit;\n \tstruct ulp_rte_field_bitmap\tfld_bitmap;\ndiff --git a/drivers/net/bnxt/tf_ulp/ulp_tun.c b/drivers/net/bnxt/tf_ulp/ulp_tun.c\nindex 884692947a..6c1ae3ced2 100644\n--- a/drivers/net/bnxt/tf_ulp/ulp_tun.c\n+++ b/drivers/net/bnxt/tf_ulp/ulp_tun.c\n@@ -3,6 +3,8 @@\n  * All rights reserved.\n  */\n \n+#include <sys/queue.h>\n+\n #include <rte_malloc.h>\n \n #include \"ulp_tun.h\"\n@@ -48,19 +50,18 @@ ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,\n \t\tgoto err;\n \n \t/* Store the tunnel dmac in the tunnel cache table and use it while\n-\t * programming tunnel flow F2.\n+\t * programming tunnel inner flow.\n \t */\n \tmemcpy(tun_entry->t_dmac,\n \t       &params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,\n \t       RTE_ETHER_ADDR_LEN);\n \n-\ttun_entry->valid = true;\n \ttun_entry->tun_flow_info[params->port_id].state =\n \t\t\t\tBNXT_ULP_FLOW_STATE_TUN_O_OFFLD;\n \ttun_entry->outer_tun_flow_id = params->fid;\n \n-\t/* F1 and it's related F2s are correlated based on\n-\t * Tunnel Destination IP Address.\n+\t/* Tunnel outer flow  and it's related inner flows are correlated\n+\t * based on Tunnel Destination IP Address.\n \t */\n \tif (tun_entry->t_dst_ip_valid)\n \t\tgoto done;\n@@ -89,25 +90,27 @@ ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,\n {\n \tstruct bnxt_ulp_mapper_create_parms mparms = { 0 };\n \tstruct ulp_per_port_flow_info *flow_info;\n-\tstruct ulp_rte_parser_params *params;\n+\tstruct ulp_rte_parser_params *inner_params;\n \tint ret;\n \n-\t/* F2 doesn't have tunnel dmac, use the tunnel dmac that was\n-\t * stored during F1 programming.\n+\t/* Tunnel inner flow doesn't have tunnel dmac, use the tunnel\n+\t * dmac that was stored during F1 programming.\n \t */\n \tflow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];\n-\tparams = &flow_info->first_inner_tun_params;\n-\tmemcpy(&params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],\n-\t       tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);\n-\tparams->parent_fid = tun_entry->outer_tun_flow_id;\n-\tparams->fid = flow_info->first_tun_i_fid;\n-\n-\tbnxt_ulp_init_mapper_params(&mparms, params,\n-\t\t\t\t    BNXT_ULP_FDB_TYPE_REGULAR);\n-\n-\tret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);\n-\tif (ret)\n-\t\tPMD_DRV_LOG(ERR, \"Failed to create F2 flow.\");\n+\tSTAILQ_FOREACH(inner_params, &flow_info->tun_i_prms_list, next) {\n+\t\tmemcpy(&inner_params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],\n+\t\t       tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);\n+\t\tinner_params->parent_fid = tun_entry->outer_tun_flow_id;\n+\n+\t\tbnxt_ulp_init_mapper_params(&mparms, inner_params,\n+\t\t\t\t\t    BNXT_ULP_FDB_TYPE_REGULAR);\n+\n+\t\tret = ulp_mapper_flow_create(inner_params->ulp_ctx, &mparms);\n+\t\tif (ret)\n+\t\t\tPMD_DRV_LOG(ERR,\n+\t\t\t\t    \"Failed to create inner tun flow, FID:%u.\",\n+\t\t\t\t    inner_params->fid);\n+\t}\n }\n \n /* This function either install outer tunnel flow & inner tunnel flow\n@@ -118,21 +121,18 @@ ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,\n \t\t\t     struct bnxt_tun_cache_entry *tun_entry,\n \t\t\t     uint16_t tun_idx)\n {\n-\tenum bnxt_ulp_tun_flow_state flow_state;\n \tint ret;\n \n-\tflow_state = tun_entry->tun_flow_info[params->port_id].state;\n \tret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);\n \tif (ret == BNXT_TF_RC_ERROR) {\n \t\tPMD_DRV_LOG(ERR, \"Failed to create outer tunnel flow.\");\n \t\treturn ret;\n \t}\n \n-\t/* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing\n-\t * F1, that means F2 is not deferred. Hence, no need to install F2.\n+\t/* Install any cached tunnel inner flows that came before tunnel\n+\t * outer flow.\n \t */\n-\tif (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)\n-\t\tulp_install_inner_tun_flow(tun_entry, params);\n+\tulp_install_inner_tun_flow(tun_entry, params);\n \n \treturn BNXT_TF_RC_FID;\n }\n@@ -141,9 +141,10 @@ ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,\n  * outer tunnel flow request.\n  */\n static int32_t\n-ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,\n+ulp_post_process_cache_inner_tun_flow(struct ulp_rte_parser_params *params,\n \t\t\t\t      struct bnxt_tun_cache_entry *tun_entry)\n {\n+\tstruct ulp_rte_parser_params *inner_tun_params;\n \tstruct ulp_per_port_flow_info *flow_info;\n \tint ret;\n \n@@ -155,19 +156,22 @@ ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,\n \tif (ret != BNXT_TF_RC_SUCCESS)\n \t\treturn BNXT_TF_RC_ERROR;\n \n-\t/* If Tunnel F2 flow comes first then we can't install it in the\n-\t * hardware, because, F2 flow will not have L2 context information.\n-\t * So, just cache the F2 information and program it in the context\n-\t * of F1 flow installation.\n+\t/* If Tunnel inner flow comes first then we can't install it in the\n+\t * hardware, because, Tunnel inner flow will not have L2 context\n+\t * information. So, just cache the Tunnel inner flow information\n+\t * and program it in the context of F1 flow installation.\n \t */\n \tflow_info = &tun_entry->tun_flow_info[params->port_id];\n-\tmemcpy(&flow_info->first_inner_tun_params, params,\n-\t       sizeof(struct ulp_rte_parser_params));\n-\n-\tflow_info->first_tun_i_fid = params->fid;\n-\tflow_info->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;\n+\tinner_tun_params = rte_zmalloc(\"ulp_inner_tun_params\",\n+\t\t\t\t       sizeof(struct ulp_rte_parser_params), 0);\n+\tif (!inner_tun_params)\n+\t\treturn BNXT_TF_RC_ERROR;\n+\tmemcpy(inner_tun_params, params, sizeof(struct ulp_rte_parser_params));\n+\tSTAILQ_INSERT_TAIL(&flow_info->tun_i_prms_list, inner_tun_params,\n+\t\t\t   next);\n+\tflow_info->tun_i_cnt++;\n \n-\t/* F1 and it's related F2s are correlated based on\n+\t/* F1 and it's related Tunnel inner flows are correlated based on\n \t * Tunnel Destination IP Address. It could be already set, if\n \t * the inner flow got offloaded first.\n \t */\n@@ -248,8 +252,8 @@ ulp_get_tun_entry(struct ulp_rte_parser_params *params,\n int32_t\n ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)\n {\n-\tbool outer_tun_sig, inner_tun_sig, first_inner_tun_flow;\n-\tbool outer_tun_reject, inner_tun_reject, outer_tun_flow, inner_tun_flow;\n+\tbool inner_tun_sig, cache_inner_tun_flow;\n+\tbool outer_tun_reject, outer_tun_flow, inner_tun_flow;\n \tenum bnxt_ulp_tun_flow_state flow_state;\n \tstruct bnxt_tun_cache_entry *tun_entry;\n \tuint32_t l3_tun, l3_tun_decap;\n@@ -267,40 +271,31 @@ ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)\n \tif (rc == BNXT_TF_RC_ERROR)\n \t\treturn rc;\n \n+\tif (params->port_id >= RTE_MAX_ETHPORTS)\n+\t\treturn BNXT_TF_RC_ERROR;\n \tflow_state = tun_entry->tun_flow_info[params->port_id].state;\n \t/* Outer tunnel flow validation */\n-\touter_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);\n-\touter_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);\n+\touter_tun_flow = BNXT_OUTER_TUN_FLOW(l3_tun, params);\n \touter_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,\n-\t\t\t\t\t\t      outer_tun_sig);\n+\t\t\t\t\t\t      outer_tun_flow);\n \n \t/* Inner tunnel flow validation */\n \tinner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);\n-\tfirst_inner_tun_flow = BNXT_FIRST_INNER_TUN_FLOW(flow_state,\n+\tcache_inner_tun_flow = BNXT_CACHE_INNER_TUN_FLOW(flow_state,\n \t\t\t\t\t\t\t inner_tun_sig);\n \tinner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);\n-\tinner_tun_reject = BNXT_REJECT_INNER_TUN_FLOW(flow_state,\n-\t\t\t\t\t\t      inner_tun_sig);\n \n \tif (outer_tun_reject) {\n \t\ttun_entry->outer_tun_rej_cnt++;\n \t\tBNXT_TF_DBG(ERR,\n \t\t\t    \"Tunnel F1 flow rejected, COUNT: %d\\n\",\n \t\t\t    tun_entry->outer_tun_rej_cnt);\n-\t/* Inner tunnel flow is rejected if it comes between first inner\n-\t * tunnel flow and outer flow requests.\n-\t */\n-\t} else if (inner_tun_reject) {\n-\t\ttun_entry->inner_tun_rej_cnt++;\n-\t\tBNXT_TF_DBG(ERR,\n-\t\t\t    \"Tunnel F2 flow rejected, COUNT: %d\\n\",\n-\t\t\t    tun_entry->inner_tun_rej_cnt);\n \t}\n \n-\tif (outer_tun_reject || inner_tun_reject)\n+\tif (outer_tun_reject)\n \t\treturn BNXT_TF_RC_ERROR;\n-\telse if (first_inner_tun_flow)\n-\t\treturn ulp_post_process_first_inner_tun_flow(params, tun_entry);\n+\telse if (cache_inner_tun_flow)\n+\t\treturn ulp_post_process_cache_inner_tun_flow(params, tun_entry);\n \telse if (outer_tun_flow)\n \t\treturn ulp_post_process_outer_tun_flow(params, tun_entry,\n \t\t\t\t\t\t       tun_idx);\n@@ -310,11 +305,86 @@ ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)\n \t\treturn BNXT_TF_RC_NORMAL;\n }\n \n+void\n+ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl)\n+{\n+\tstruct ulp_per_port_flow_info *flow_info;\n+\tint i, j;\n+\n+\tfor (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {\n+\t\tfor (j = 0; j < RTE_MAX_ETHPORTS; j++) {\n+\t\t\tflow_info = &tun_tbl[i].tun_flow_info[j];\n+\t\t\tSTAILQ_INIT(&flow_info->tun_i_prms_list);\n+\t\t}\n+\t}\n+}\n+\n void\n ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)\n {\n+\tstruct ulp_rte_parser_params *inner_params;\n+\tstruct ulp_per_port_flow_info *flow_info;\n+\tint j;\n+\n+\tfor (j = 0; j < RTE_MAX_ETHPORTS; j++) {\n+\t\tflow_info = &tun_tbl[tun_idx].tun_flow_info[j];\n+\t\tSTAILQ_FOREACH(inner_params,\n+\t\t\t       &flow_info->tun_i_prms_list,\n+\t\t\t       next) {\n+\t\t\tSTAILQ_REMOVE(&flow_info->tun_i_prms_list,\n+\t\t\t\t      inner_params,\n+\t\t\t\t      ulp_rte_parser_params, next);\n+\t\t\trte_free(inner_params);\n+\t\t}\n+\t}\n+\n \tmemset(&tun_tbl[tun_idx], 0,\n-\t\tsizeof(struct bnxt_tun_cache_entry));\n+\t\t\tsizeof(struct bnxt_tun_cache_entry));\n+\n+\tfor (j = 0; j < RTE_MAX_ETHPORTS; j++) {\n+\t\tflow_info = &tun_tbl[tun_idx].tun_flow_info[j];\n+\t\tSTAILQ_INIT(&flow_info->tun_i_prms_list);\n+\t}\n+}\n+\n+static bool\n+ulp_chk_and_rem_tun_i_flow(struct bnxt_tun_cache_entry *tun_entry,\n+\t\t\t   struct ulp_per_port_flow_info *flow_info,\n+\t\t\t   uint32_t fid)\n+{\n+\tstruct ulp_rte_parser_params *inner_params;\n+\tint j;\n+\n+\tSTAILQ_FOREACH(inner_params,\n+\t\t       &flow_info->tun_i_prms_list,\n+\t\t       next) {\n+\t\tif (inner_params->fid == fid) {\n+\t\t\tSTAILQ_REMOVE(&flow_info->tun_i_prms_list,\n+\t\t\t\t      inner_params,\n+\t\t\t\t      ulp_rte_parser_params,\n+\t\t\t\t      next);\n+\t\t\trte_free(inner_params);\n+\t\t\tflow_info->tun_i_cnt--;\n+\t\t\t/* When a dpdk application offloads a duplicate\n+\t\t\t * tunnel inner flow on a port that it is not\n+\t\t\t * destined to, there won't be a tunnel outer flow\n+\t\t\t * associated with these duplicate tunnel inner flows.\n+\t\t\t * So, when the last tunnel inner flow ages out, the\n+\t\t\t * driver has to clear the tunnel entry, otherwise\n+\t\t\t * the tunnel entry cannot be reused.\n+\t\t\t */\n+\t\t\tif (!flow_info->tun_i_cnt &&\n+\t\t\t    flow_info->state != BNXT_ULP_FLOW_STATE_TUN_O_OFFLD) {\n+\t\t\t\tmemset(tun_entry, 0,\n+\t\t\t\t       sizeof(struct bnxt_tun_cache_entry));\n+\t\t\t\tfor (j = 0; j < RTE_MAX_ETHPORTS; j++)\n+\t\t\t\t\tSTAILQ_INIT(&flow_info->tun_i_prms_list);\n+\t\t\t}\n+\t\t\treturn true;\n+\t\t}\n+\t}\n+\n+\treturn false;\n }\n \n /* When a dpdk application offloads the same tunnel inner flow\n@@ -330,12 +400,14 @@ ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)\n \tstruct ulp_per_port_flow_info *flow_info;\n \tint i, j;\n \n-\tfor (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES ; i++) {\n+\tfor (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {\n+\t\tif (!tun_tbl[i].t_dst_ip_valid)\n+\t\t\tcontinue;\n \t\tfor (j = 0; j < RTE_MAX_ETHPORTS; j++) {\n \t\t\tflow_info = &tun_tbl[i].tun_flow_info[j];\n-\t\t\tif (flow_info->first_tun_i_fid == fid &&\n-\t\t\t    flow_info->state == BNXT_ULP_FLOW_STATE_TUN_I_CACHED)\n-\t\t\t\tmemset(flow_info, 0, sizeof(*flow_info));\n+\t\t\tif (ulp_chk_and_rem_tun_i_flow(&tun_tbl[i],\n+\t\t\t\t\t\t       flow_info, fid) == true)\n+\t\t\t\treturn;\n \t\t}\n \t}\n }\ndiff --git a/drivers/net/bnxt/tf_ulp/ulp_tun.h b/drivers/net/bnxt/tf_ulp/ulp_tun.h\nindex af6926f0e4..7e31f81f13 100644\n--- a/drivers/net/bnxt/tf_ulp/ulp_tun.h\n+++ b/drivers/net/bnxt/tf_ulp/ulp_tun.h\n@@ -15,7 +15,7 @@\n #include \"ulp_template_db_enum.h\"\n #include \"ulp_template_struct.h\"\n \n-#define\tBNXT_OUTER_TUN_SIGNATURE(l3_tun, params)\t\t\\\n+#define\tBNXT_OUTER_TUN_FLOW(l3_tun, params)\t\t\\\n \t((l3_tun) &&\t\t\t\t\t\\\n \t ULP_BITMAP_ISSET((params)->act_bitmap.bits,\t\\\n \t\t\t  BNXT_ULP_ACTION_BIT_JUMP))\n@@ -24,22 +24,16 @@\n \t !ULP_BITMAP_ISSET((params)->hdr_bitmap.bits,\t\t\t\\\n \t\t\t   BNXT_ULP_HDR_BIT_O_ETH))\n \n-#define\tBNXT_FIRST_INNER_TUN_FLOW(state, inner_tun_sig)\t\\\n+#define\tBNXT_CACHE_INNER_TUN_FLOW(state, inner_tun_sig)\t\\\n \t((state) == BNXT_ULP_FLOW_STATE_NORMAL && (inner_tun_sig))\n #define\tBNXT_INNER_TUN_FLOW(state, inner_tun_sig)\t\t\\\n \t((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (inner_tun_sig))\n-#define\tBNXT_OUTER_TUN_FLOW(outer_tun_sig)\t\t((outer_tun_sig))\n \n /* It is invalid to get another outer flow offload request\n  * for the same tunnel, while the outer flow is already offloaded.\n  */\n #define\tBNXT_REJECT_OUTER_TUN_FLOW(state, outer_tun_sig)\t\\\n \t((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (outer_tun_sig))\n-/* It is invalid to get another inner flow offload request\n- * for the same tunnel, while the outer flow is not yet offloaded.\n- */\n-#define\tBNXT_REJECT_INNER_TUN_FLOW(state, inner_tun_sig)\t\\\n-\t((state) == BNXT_ULP_FLOW_STATE_TUN_I_CACHED && (inner_tun_sig))\n \n #define\tULP_TUN_O_DMAC_HDR_FIELD_INDEX\t1\n #define\tULP_TUN_O_IPV4_DIP_INDEX\t19\n@@ -50,10 +44,10 @@\n  * requests arrive.\n  *\n  * If inner tunnel flow offload request arrives first then the flow\n- * state will change from BNXT_ULP_FLOW_STATE_NORMAL to\n- * BNXT_ULP_FLOW_STATE_TUN_I_CACHED and the following outer tunnel\n- * flow offload request will change the state of the flow to\n- * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from BNXT_ULP_FLOW_STATE_TUN_I_CACHED.\n+ * state will remain in BNXT_ULP_FLOW_STATE_NORMAL state.\n+ * The following outer tunnel flow offload request will change the\n+ * state of the flow to BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from\n+ * BNXT_ULP_FLOW_STATE_NORMAL.\n  *\n  * If outer tunnel flow offload request arrives first then the flow state\n  * will change from BNXT_ULP_FLOW_STATE_NORMAL to\n@@ -67,17 +61,15 @@\n enum bnxt_ulp_tun_flow_state {\n \tBNXT_ULP_FLOW_STATE_NORMAL = 0,\n \tBNXT_ULP_FLOW_STATE_TUN_O_OFFLD,\n-\tBNXT_ULP_FLOW_STATE_TUN_I_CACHED\n };\n \n struct ulp_per_port_flow_info {\n-\tenum bnxt_ulp_tun_flow_state\tstate;\n-\tuint32_t\t\t\tfirst_tun_i_fid;\n-\tstruct ulp_rte_parser_params\tfirst_inner_tun_params;\n+\tenum bnxt_ulp_tun_flow_state\t\tstate;\n+\tuint32_t\t\t\t\ttun_i_cnt;\n+\tSTAILQ_HEAD(, ulp_rte_parser_params)\ttun_i_prms_list;\n };\n \n struct bnxt_tun_cache_entry {\n-\tbool\t\t\t\tvalid;\n \tbool\t\t\t\tt_dst_ip_valid;\n \tuint8_t\t\t\t\tt_dmac[RTE_ETHER_ADDR_LEN];\n \tunion {\n@@ -86,10 +78,12 @@ struct bnxt_tun_cache_entry {\n \t};\n \tuint32_t\t\t\touter_tun_flow_id;\n \tuint16_t\t\t\touter_tun_rej_cnt;\n-\tuint16_t\t\t\tinner_tun_rej_cnt;\n \tstruct ulp_per_port_flow_info\ttun_flow_info[RTE_MAX_ETHPORTS];\n };\n \n+void\n+ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl);\n+\n void\n ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx);\n \n",
    "prefixes": [
        "v2",
        "31/58"
    ]
}