get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/79758/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 79758,
    "url": "http://patches.dpdk.org/api/patches/79758/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/1601984948-313027-4-git-send-email-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<1601984948-313027-4-git-send-email-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/1601984948-313027-4-git-send-email-suanmingm@nvidia.com",
    "date": "2020-10-06T11:48:46",
    "name": "[03/25] net/mlx5: reuse flow Id as hairpin Id",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "e3830ec0ea2902d04936ff4f0b1edcd0b407a725",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/1601984948-313027-4-git-send-email-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 12718,
            "url": "http://patches.dpdk.org/api/series/12718/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=12718",
            "date": "2020-10-06T11:48:45",
            "name": "net/mlx5: support multiple-thread flow operations",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/12718/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/79758/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/79758/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E7F81A04BB;\n\tTue,  6 Oct 2020 13:50:43 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 9C2581B3D9;\n\tTue,  6 Oct 2020 13:49:27 +0200 (CEST)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by dpdk.org (Postfix) with ESMTP id A134F4C98\n for <dev@dpdk.org>; Tue,  6 Oct 2020 13:49:24 +0200 (CEST)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n suanmingm@nvidia.com) with SMTP; 6 Oct 2020 14:49:19 +0300",
            "from nvidia.com (mtbc-r640-04.mtbc.labs.mlnx [10.75.70.9])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 096BnC0N028553;\n Tue, 6 Oct 2020 14:49:18 +0300"
        ],
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "viacheslavo@nvidia.com, matan@nvidia.com",
        "Cc": "rasland@nvidia.com, dev@dpdk.org, Xueming Li <xuemingl@nvidia.com>",
        "Date": "Tue,  6 Oct 2020 19:48:46 +0800",
        "Message-Id": "<1601984948-313027-4-git-send-email-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 1.8.3.1",
        "In-Reply-To": "<1601984948-313027-1-git-send-email-suanmingm@nvidia.com>",
        "References": "<1601984948-313027-1-git-send-email-suanmingm@nvidia.com>",
        "Subject": "[dpdk-dev] [PATCH 03/25] net/mlx5: reuse flow Id as hairpin Id",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Xueming Li <xuemingl@nvidia.com>\n\nHairpin flow matching required a unique flow ID for matching.\nThis patch reuses flow ID as hairpin flow ID, this will save some code\nto generate a separate hairpin ID, also saves flow memory by removing\nhairpin ID.\n\nSigned-off-by: Xueming Li <xuemingl@nvidia.com>\n---\n drivers/net/mlx5/mlx5.c      | 11 -----------\n drivers/net/mlx5/mlx5.h      |  1 -\n drivers/net/mlx5/mlx5_flow.c | 32 ++++++++++----------------------\n drivers/net/mlx5/mlx5_flow.h |  5 +----\n 4 files changed, 11 insertions(+), 38 deletions(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex 16719e6..6c5c04d 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -952,13 +952,6 @@ struct mlx5_dev_ctx_shared *\n \t\tMLX5_ASSERT(sh->devx_rx_uar);\n \t\tMLX5_ASSERT(mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar));\n \t}\n-\tsh->flow_id_pool = mlx5_flow_id_pool_alloc\n-\t\t\t\t\t((1 << HAIRPIN_FLOW_ID_BITS) - 1);\n-\tif (!sh->flow_id_pool) {\n-\t\tDRV_LOG(ERR, \"can't create flow id pool\");\n-\t\terr = ENOMEM;\n-\t\tgoto error;\n-\t}\n #ifndef RTE_ARCH_64\n \t/* Initialize UAR access locks for 32bit implementations. */\n \trte_spinlock_init(&sh->uar_lock_cq);\n@@ -1020,8 +1013,6 @@ struct mlx5_dev_ctx_shared *\n \t\tclaim_zero(mlx5_glue->dealloc_pd(sh->pd));\n \tif (sh->ctx)\n \t\tclaim_zero(mlx5_glue->close_device(sh->ctx));\n-\tif (sh->flow_id_pool)\n-\t\tmlx5_flow_id_pool_release(sh->flow_id_pool);\n \tmlx5_free(sh);\n \tMLX5_ASSERT(err > 0);\n \trte_errno = err;\n@@ -1092,8 +1083,6 @@ struct mlx5_dev_ctx_shared *\n \t\tmlx5_glue->devx_free_uar(sh->devx_rx_uar);\n \tif (sh->ctx)\n \t\tclaim_zero(mlx5_glue->close_device(sh->ctx));\n-\tif (sh->flow_id_pool)\n-\t\tmlx5_flow_id_pool_release(sh->flow_id_pool);\n \tpthread_mutex_destroy(&sh->txpp.mutex);\n \tmlx5_free(sh);\n \treturn;\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex 0080ac8..a3ec994 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -652,7 +652,6 @@ struct mlx5_dev_ctx_shared {\n \tvoid *devx_comp; /* DEVX async comp obj. */\n \tstruct mlx5_devx_obj *tis; /* TIS object. */\n \tstruct mlx5_devx_obj *td; /* Transport domain. */\n-\tstruct mlx5_flow_id_pool *flow_id_pool; /* Flow ID pool. */\n \tvoid *tx_uar; /* Tx/packet pacing shared UAR. */\n \tstruct mlx5_flex_parser_profiles fp[MLX5_FLEX_PARSER_MAX];\n \t/* Flex parser profiles information. */\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex eeee546..f0a6a57 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -3426,9 +3426,8 @@ struct mlx5_flow_tunnel_info {\n \t\t   struct rte_flow_action actions_rx[],\n \t\t   struct rte_flow_action actions_tx[],\n \t\t   struct rte_flow_item pattern_tx[],\n-\t\t   uint32_t *flow_id)\n+\t\t   uint32_t flow_id)\n {\n-\tstruct mlx5_priv *priv = dev->data->dev_private;\n \tconst struct rte_flow_action_raw_encap *raw_encap;\n \tconst struct rte_flow_action_raw_decap *raw_decap;\n \tstruct mlx5_rte_flow_action_set_tag *set_tag;\n@@ -3438,7 +3437,6 @@ struct mlx5_flow_tunnel_info {\n \tchar *addr;\n \tint encap = 0;\n \n-\tmlx5_flow_id_get(priv->sh->flow_id_pool, flow_id);\n \tfor (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {\n \t\tswitch (actions->type) {\n \t\tcase RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:\n@@ -3507,7 +3505,7 @@ struct mlx5_flow_tunnel_info {\n \tset_tag = (void *)actions_rx;\n \tset_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);\n \tMLX5_ASSERT(set_tag->id > REG_NON);\n-\tset_tag->data = *flow_id;\n+\tset_tag->data = flow_id;\n \ttag_action->conf = set_tag;\n \t/* Create Tx item list. */\n \trte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));\n@@ -3516,7 +3514,7 @@ struct mlx5_flow_tunnel_info {\n \titem->type = (enum rte_flow_item_type)\n \t\t     MLX5_RTE_FLOW_ITEM_TYPE_TAG;\n \ttag_item = (void *)addr;\n-\ttag_item->data = *flow_id;\n+\ttag_item->data = flow_id;\n \ttag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);\n \tMLX5_ASSERT(set_tag->id > REG_NON);\n \titem->spec = tag_item;\n@@ -4360,7 +4358,6 @@ struct mlx5_flow_tunnel_info {\n \tuint32_t i;\n \tuint32_t idx = 0;\n \tint hairpin_flow;\n-\tuint32_t hairpin_id = 0;\n \tstruct rte_flow_attr attr_tx = { .priority = 0 };\n \tstruct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();\n \tint ret;\n@@ -4372,24 +4369,22 @@ struct mlx5_flow_tunnel_info {\n \t\t\t\texternal, hairpin_flow, error);\n \tif (ret < 0)\n \t\treturn 0;\n+\tflow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);\n+\tif (!flow) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn 0;\n+\t}\n \tif (hairpin_flow > 0) {\n \t\tif (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {\n \t\t\trte_errno = EINVAL;\n-\t\t\treturn 0;\n+\t\t\tgoto error;\n \t\t}\n \t\tflow_hairpin_split(dev, actions, actions_rx.actions,\n \t\t\t\t   actions_hairpin_tx.actions, items_tx.items,\n-\t\t\t\t   &hairpin_id);\n+\t\t\t\t   idx);\n \t\tp_actions_rx = actions_rx.actions;\n \t}\n-\tflow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);\n-\tif (!flow) {\n-\t\trte_errno = ENOMEM;\n-\t\tgoto error_before_flow;\n-\t}\n \tflow->drv_type = flow_get_drv_type(dev, attr);\n-\tif (hairpin_id != 0)\n-\t\tflow->hairpin_flow_id = hairpin_id;\n \tMLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&\n \t\t    flow->drv_type < MLX5_FLOW_TYPE_MAX);\n \tmemset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));\n@@ -4517,11 +4512,7 @@ struct mlx5_flow_tunnel_info {\n \tflow_drv_destroy(dev, flow);\n \tmlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);\n \trte_errno = ret; /* Restore rte_errno. */\n-error_before_flow:\n \tret = rte_errno;\n-\tif (hairpin_id)\n-\t\tmlx5_flow_id_release(priv->sh->flow_id_pool,\n-\t\t\t\t     hairpin_id);\n \trte_errno = ret;\n \twks->flow_idx = wks->flow_nested_idx;\n \tif (wks->flow_nested_idx)\n@@ -4662,9 +4653,6 @@ struct rte_flow *\n \t */\n \tif (dev->data->dev_started)\n \t\tflow_rxq_flags_trim(dev, flow);\n-\tif (flow->hairpin_flow_id)\n-\t\tmlx5_flow_id_release(priv->sh->flow_id_pool,\n-\t\t\t\t     flow->hairpin_flow_id);\n \tflow_drv_destroy(dev, flow);\n \tif (list)\n \t\tILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex 2685481..4a89524 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -841,8 +841,6 @@ struct mlx5_fdir_flow {\n \tuint32_t rix_flow; /* Index to flow. */\n };\n \n-#define HAIRPIN_FLOW_ID_BITS 28\n-\n /* Flow structure. */\n struct rte_flow {\n \tILIST_ENTRY(uint32_t)next; /**< Index to the next flow structure. */\n@@ -850,13 +848,12 @@ struct rte_flow {\n \t/**< Device flow handles that are part of the flow. */\n \tuint32_t drv_type:2; /**< Driver type. */\n \tuint32_t fdir:1; /**< Identifier of associated FDIR if any. */\n-\tuint32_t hairpin_flow_id:HAIRPIN_FLOW_ID_BITS;\n \t/**< The flow id used for hairpin. */\n \tuint32_t copy_applied:1; /**< The MARK copy Flow os applied. */\n+\tuint32_t meter:16; /**< Holds flow meter id. */\n \tuint32_t rix_mreg_copy;\n \t/**< Index to metadata register copy table resource. */\n \tuint32_t counter; /**< Holds flow counter. */\n-\tuint16_t meter; /**< Holds flow meter id. */\n } __rte_packed;\n \n /* Thread specific flow workspace intermediate data. */\n",
    "prefixes": [
        "03/25"
    ]
}