get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/108214/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 108214,
    "url": "http://patches.dpdk.org/api/patches/108214/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20220224031029.14049-5-suanmingm@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220224031029.14049-5-suanmingm@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220224031029.14049-5-suanmingm@nvidia.com",
    "date": "2022-02-24T03:10:19",
    "name": "[v3,04/14] net/mlx5: add port flow configuration",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "c560122ddb43be8720abce7da152691a11f075af",
    "submitter": {
        "id": 1887,
        "url": "http://patches.dpdk.org/api/people/1887/?format=api",
        "name": "Suanming Mou",
        "email": "suanmingm@nvidia.com"
    },
    "delegate": {
        "id": 3268,
        "url": "http://patches.dpdk.org/api/users/3268/?format=api",
        "username": "rasland",
        "first_name": "Raslan",
        "last_name": "Darawsheh",
        "email": "rasland@nvidia.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20220224031029.14049-5-suanmingm@nvidia.com/mbox/",
    "series": [
        {
            "id": 21839,
            "url": "http://patches.dpdk.org/api/series/21839/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=21839",
            "date": "2022-02-24T03:10:16",
            "name": "net/mlx5: add hardware steering",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/21839/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/108214/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/108214/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 79074A0353;\n\tThu, 24 Feb 2022 04:12:17 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id CD5EF426D4;\n\tThu, 24 Feb 2022 04:11:24 +0100 (CET)",
            "from NAM12-BN8-obe.outbound.protection.outlook.com\n (mail-bn8nam12on2057.outbound.protection.outlook.com [40.107.237.57])\n by mails.dpdk.org (Postfix) with ESMTP id CE40D411E6\n for <dev@dpdk.org>; Thu, 24 Feb 2022 04:11:19 +0100 (CET)",
            "from DM6PR05CA0055.namprd05.prod.outlook.com (2603:10b6:5:335::24)\n by BL0PR12MB2450.namprd12.prod.outlook.com (2603:10b6:207:4d::32) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4995.16; Thu, 24 Feb\n 2022 03:11:17 +0000",
            "from DM6NAM11FT035.eop-nam11.prod.protection.outlook.com\n (2603:10b6:5:335:cafe::7f) by DM6PR05CA0055.outlook.office365.com\n (2603:10b6:5:335::24) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.5017.23 via Frontend\n Transport; Thu, 24 Feb 2022 03:11:17 +0000",
            "from mail.nvidia.com (12.22.5.235) by\n DM6NAM11FT035.mail.protection.outlook.com (10.13.172.100) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.5017.22 via Frontend Transport; Thu, 24 Feb 2022 03:11:17 +0000",
            "from rnnvmail201.nvidia.com (10.129.68.8) by DRHQMAIL107.nvidia.com\n (10.27.9.16) with Microsoft SMTP Server (TLS) id 15.0.1497.18;\n Thu, 24 Feb 2022 03:10:53 +0000",
            "from nvidia.com (10.126.231.35) by rnnvmail201.nvidia.com\n (10.129.68.8) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.9; Wed, 23 Feb 2022\n 19:10:50 -0800"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=l3SSBxhwBITh9Kv5iWKt1BvuCbRqhDO29YC0cxoAG1ZbcfgL5PlTmqggdvEo7uDrCAyWb+X3PcMcA2TOyjZjyFvduqSDRe8FPao8TSwGCUqke3T2vR2FXAZRp+ZMYBIQbfW19XS3pEa+msnAF74n8hUVxd22sWIK3EhO4kFFLt4Qf75CsrgCLY/yHN5zLWe/BqTk2nADY2O9f/TJLUWjL2zOp+t/bsE/1RSCRTrtz+mlKtjF2T5iR9UsRGO+T6hx7ZixO2yNNpo2+yK7wl7ai6ZEZBFUPsD+hT5Numt0GjjIFIT61a7B9hSMX6ANN7mymej7EUI0UQH0lmgZrYAAoA==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=Wysg6Bo6K3N4117kW1BkMC1PGkE01UesRceqG8wZxg4=;\n b=Kew/nvGBvI4MpDeSzv9Bk1YA9gqJtx7J4hhvNZwLZ3zvcWaj5meCgVbj047EvFkE0cB+G40k1rNlG+WjqYST/nBk2MAHAzVbhOhqG04QBs0dHHTw5lAJrIKna5mjAUmL3H7okgzcoGUxt/XkKYgdgWPTWMBPs9anznTxpmfQViVK5YvCTVYvqWlDJcFUood7pHq1hSqoNCCSEjMnEJAxAY1rJo72W3zSG0Q9VqPdKWEoA5esvsZGBMArDwjMv6jZo+SNPEElGSeeylya+HBIRZCdBEwsPGamk7FTcLzd7ujwLVxnq8QGhcNIqVEKfUiXQs+rYHDjEEQC/MG6liTY9g==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 12.22.5.235) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com; dmarc=pass\n (p=reject sp=reject pct=100) action=none header.from=nvidia.com; dkim=none\n (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=Wysg6Bo6K3N4117kW1BkMC1PGkE01UesRceqG8wZxg4=;\n b=fNWwMkMaEy8r1IIEVFAiL/LK8z9xfasyjgtR2QM+fHOY0DbwXHISHUQQHFfCYxvmfl6yq+bxsdMP/tRU/F/WVFIUVkrTYmPrIITRn5m5A8RLblROf9aiwxWuh9OUv32h5DjFwd0ApD3PT2Om3z1GH+MMiWcVRItexuSqn0FqTRt/1jy4NnjXZQRJoIiuBEaa1oyhwQfyiqs9YeQZ1pi2xAKYcDVLx6ixFoz0gUFL0qtvO9yThkXGJsivmzuBSvNlCfChEnSDGKa75/f0ORXleBbqdPO/Y+rjtO1nSR7eu7gvb01Et/2EtXliCyw8mqYz3CevGX7+36lsckN6/lDJvg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 12.22.5.235)\n smtp.mailfrom=nvidia.com; dkim=none (message not signed)\n header.d=none;dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 12.22.5.235 as permitted sender) receiver=protection.outlook.com;\n client-ip=12.22.5.235; helo=mail.nvidia.com;",
        "From": "Suanming Mou <suanmingm@nvidia.com>",
        "To": "<viacheslavo@nvidia.com>, <matan@nvidia.com>",
        "CC": "<rasland@nvidia.com>, <orika@nvidia.com>, <dev@dpdk.org>",
        "Subject": "[PATCH v3 04/14] net/mlx5: add port flow configuration",
        "Date": "Thu, 24 Feb 2022 05:10:19 +0200",
        "Message-ID": "<20220224031029.14049-5-suanmingm@nvidia.com>",
        "X-Mailer": "git-send-email 2.18.1",
        "In-Reply-To": "<20220224031029.14049-1-suanmingm@nvidia.com>",
        "References": "<20220210162926.20436-1-suanmingm@nvidia.com>\n <20220224031029.14049-1-suanmingm@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[10.126.231.35]",
        "X-ClientProxiedBy": "rnnvmail201.nvidia.com (10.129.68.8) To\n rnnvmail201.nvidia.com (10.129.68.8)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "0c5f9ca1-ca88-46a8-ebf0-08d9f74352c6",
        "X-MS-TrafficTypeDiagnostic": "BL0PR12MB2450:EE_",
        "X-Microsoft-Antispam-PRVS": "\n <BL0PR12MB2450BDA78012680064AFBFFAC13D9@BL0PR12MB2450.namprd12.prod.outlook.com>",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n RCkio6q30XgrcpU9wmBWiD2m1ogefo8qYR2dcFpQxyU1+DYe9l5TG+ThlD7IgiQ4dCX+zD7TkgwRGPJ5xfR97GfWFycqTOYnrgtSG2hNhpTG4yl7ToExyw4/taXpNRa7JP+NI3EmrocUy8rR/q1iB8JjsWxO00FCppfzHE944+PoMCRj+BGB/0WaNfQwntuMgD6zKIDlaKRW71iDpRf23wfSNaALhttAR9TuvvKNZZWbKgZNgcMpgki+SCsXkSO3XnquWABNp21Fo2aEWWGVmPm3KN8SMW6wIXzIgm85CC+ARrbtFqRIvVJy3iwyQsXJO95rqVNclW9jd+7bZkHOZqUNm2gUpnYlwOoZGjpOrUb93purBWjsR8QgNQ/QlIBEMm+qHG73WfnzObLjkL5yIbINahvO2bT8lU62JnNSfT0RieD9KabvBMVKXl98xc+x1VzNWxmcO00xEf+t7gWIFjxnTC7kcAM9Aci1jze+JcNLXo1n+9lw1utiD32dg8W+ylpot8kVNU6rl+tBs4Ajl01WXsjgIIcK2xmtJ4XKwRfROiAIF4kEjLtR0sLFvjf2+N6I2zovt5bYwGlwy0Wna5kGAq89tFXRmj1+P2L62j3ZdtAQRGUfsR54uq5aQir20dsMlCPc2khoCxCoZGqim8MEMl1ccMIeC1xV6wzmKbTWHewiqFaWvJeeUG02VtnyoWWxzaKQXXChQBkL+t2rZA9cvZAkc7by6B/c7f+oPpI=",
        "X-Forefront-Antispam-Report": "CIP:12.22.5.235; CTRY:US; LANG:en; SCL:1; SRV:;\n IPV:CAL; SFV:NSPM; H:mail.nvidia.com; PTR:InfoNoRecords; CAT:NONE;\n SFS:(13230001)(4636009)(46966006)(40470700004)(36840700001)(316002)(70206006)(70586007)(55016003)(6636002)(356005)(4326008)(8676002)(110136005)(36756003)(47076005)(36860700001)(83380400001)(81166007)(54906003)(186003)(336012)(82310400004)(30864003)(508600001)(8936002)(1076003)(5660300002)(16526019)(2616005)(6286002)(426003)(26005)(6666004)(7696005)(2906002)(40460700003)(86362001)(21314003)(36900700001);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "24 Feb 2022 03:11:17.2023 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 0c5f9ca1-ca88-46a8-ebf0-08d9f74352c6",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[12.22.5.235];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n DM6NAM11FT035.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BL0PR12MB2450",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "The hardware steering is backend to support rte_flow_async API in\nmlx5 PMD. The port configuration function creates the queues and\nneeded flow management resources.\n\nThe PMD layer configuration function allocates the queues' context\nand per-queue job descriptor pool. The job descriptor pool size\nis equal to the queue size, and the job descriptors will be popped\nfrom pool with LIFO strategy to convey the flow information during\nflow insertion/destruction. Then, while polling the queued operation\nresult, the flow information will be extracted from the job descriptor\nand the descriptor will be pushed back to the LIFO pool.\n\nThe commit creates the flow port queues and the job descriptor pools.\n\nSigned-off-by: Suanming Mou <suanmingm@nvidia.com>\nAcked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>\n---\n drivers/net/mlx5/mlx5.c         |   3 +\n drivers/net/mlx5/mlx5.h         |  30 +++++-\n drivers/net/mlx5/mlx5_flow.c    |  86 +++++++++++++++++\n drivers/net/mlx5/mlx5_flow.h    |  15 +++\n drivers/net/mlx5/mlx5_flow_hw.c | 159 ++++++++++++++++++++++++++++++++\n 5 files changed, 292 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c\nindex f49d30c05c..0079aa83c1 100644\n--- a/drivers/net/mlx5/mlx5.c\n+++ b/drivers/net/mlx5/mlx5.c\n@@ -1820,6 +1820,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)\n \t/* Free the eCPRI flex parser resource. */\n \tmlx5_flex_parser_ecpri_release(dev);\n \tmlx5_flex_item_port_cleanup(dev);\n+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n+\tflow_hw_resource_release(dev);\n+#endif\n \tif (priv->rxq_privs != NULL) {\n \t\t/* XXX race condition if mlx5_rx_burst() is still running. */\n \t\trte_delay_us_sleep(1000);\ndiff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h\nindex b2259fc1fb..f0edf7f559 100644\n--- a/drivers/net/mlx5/mlx5.h\n+++ b/drivers/net/mlx5/mlx5.h\n@@ -33,7 +33,9 @@\n #include \"mlx5_utils.h\"\n #include \"mlx5_os.h\"\n #include \"mlx5_autoconf.h\"\n-\n+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n+#include \"mlx5_dr.h\"\n+#endif\n \n #define MLX5_SH(dev) (((struct mlx5_priv *)(dev)->data->dev_private)->sh)\n \n@@ -320,6 +322,26 @@ struct mlx5_lb_ctx {\n \tuint16_t refcnt; /* Reference count for representors. */\n };\n \n+/* HW steering queue job descriptor type. */\n+enum {\n+\tMLX5_HW_Q_JOB_TYPE_CREATE, /* Flow create job type. */\n+\tMLX5_HW_Q_JOB_TYPE_DESTROY, /* Flow destroy job type. */\n+};\n+\n+/* HW steering flow management job descriptor. */\n+struct mlx5_hw_q_job {\n+\tuint32_t type; /* Job type. */\n+\tstruct rte_flow *flow; /* Flow attached to the job. */\n+\tvoid *user_data; /* Job user data. */\n+};\n+\n+/* HW steering job descriptor LIFO pool. */\n+struct mlx5_hw_q {\n+\tuint32_t job_idx; /* Free job index. */\n+\tuint32_t size; /* LIFO size. */\n+\tstruct mlx5_hw_q_job **job; /* LIFO header. */\n+} __rte_cache_aligned;\n+\n #define MLX5_COUNTERS_PER_POOL 512\n #define MLX5_MAX_PENDING_QUERIES 4\n #define MLX5_CNT_CONTAINER_RESIZE 64\n@@ -1479,6 +1501,12 @@ struct mlx5_priv {\n \tstruct mlx5_flex_item flex_item[MLX5_PORT_FLEX_ITEM_NUM];\n \t/* Flex items have been created on the port. */\n \tuint32_t flex_item_map; /* Map of allocated flex item elements. */\n+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n+\tstruct mlx5dr_context *dr_ctx; /**< HW steering DR context. */\n+\tuint32_t nb_queue; /* HW steering queue number. */\n+\t/* HW steering queue polling mechanism job descriptor LIFO. */\n+\tstruct mlx5_hw_q *hw_q;\n+#endif\n };\n \n #define PORT_ID(priv) ((priv)->dev_data->port_id)\ndiff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c\nindex cdb40c0756..554ebc804d 100644\n--- a/drivers/net/mlx5/mlx5_flow.c\n+++ b/drivers/net/mlx5/mlx5_flow.c\n@@ -805,6 +805,17 @@ static int\n mlx5_flow_flex_item_release(struct rte_eth_dev *dev,\n \t\t\t    const struct rte_flow_item_flex_handle *handle,\n \t\t\t    struct rte_flow_error *error);\n+static int\n+mlx5_flow_info_get(struct rte_eth_dev *dev,\n+\t\t   struct rte_flow_port_info *port_info,\n+\t\t   struct rte_flow_queue_info *queue_info,\n+\t\t   struct rte_flow_error *error);\n+static int\n+mlx5_flow_port_configure(struct rte_eth_dev *dev,\n+\t\t\t const struct rte_flow_port_attr *port_attr,\n+\t\t\t uint16_t nb_queue,\n+\t\t\t const struct rte_flow_queue_attr *queue_attr[],\n+\t\t\t struct rte_flow_error *err);\n \n static const struct rte_flow_ops mlx5_flow_ops = {\n \t.validate = mlx5_flow_validate,\n@@ -826,6 +837,8 @@ static const struct rte_flow_ops mlx5_flow_ops = {\n \t.get_restore_info = mlx5_flow_tunnel_get_restore_info,\n \t.flex_item_create = mlx5_flow_flex_item_create,\n \t.flex_item_release = mlx5_flow_flex_item_release,\n+\t.info_get = mlx5_flow_info_get,\n+\t.configure = mlx5_flow_port_configure,\n };\n \n /* Tunnel information. */\n@@ -3429,6 +3442,12 @@ flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)\n \n \tif (type != MLX5_FLOW_TYPE_MAX)\n \t\treturn type;\n+\t/*\n+\t * Currently when dv_flow_en == 2, only HW steering engine is\n+\t * supported. New engines can also be chosen here if ready.\n+\t */\n+\tif (priv->sh->config.dv_flow_en == 2)\n+\t\treturn MLX5_FLOW_TYPE_HW;\n \t/* If no OS specific type - continue with DV/VERBS selection */\n \tif (attr->transfer && priv->sh->config.dv_esw_en)\n \t\ttype = MLX5_FLOW_TYPE_DV;\n@@ -7838,6 +7857,73 @@ mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,\n \treturn -ENOTSUP;\n }\n \n+/**\n+ * Get information about HWS pre-configurable resources.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[out] port_info\n+ *   Pointer to port information.\n+ * @param[out] queue_info\n+ *   Pointer to queue information.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_info_get(struct rte_eth_dev *dev,\n+\t\t   struct rte_flow_port_info *port_info,\n+\t\t   struct rte_flow_queue_info *queue_info,\n+\t\t   struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\n+\tif (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"info get with incorrect steering mode\");\n+\tfops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);\n+\treturn fops->info_get(dev, port_info, queue_info, error);\n+}\n+\n+/**\n+ * Configure port HWS resources.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] port_attr\n+ *   Port configuration attributes.\n+ * @param[in] nb_queue\n+ *   Number of queue.\n+ * @param[in] queue_attr\n+ *   Array that holds attributes for each flow queue.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+mlx5_flow_port_configure(struct rte_eth_dev *dev,\n+\t\t\t const struct rte_flow_port_attr *port_attr,\n+\t\t\t uint16_t nb_queue,\n+\t\t\t const struct rte_flow_queue_attr *queue_attr[],\n+\t\t\t struct rte_flow_error *error)\n+{\n+\tconst struct mlx5_flow_driver_ops *fops;\n+\n+\tif (flow_get_drv_type(dev, NULL) != MLX5_FLOW_TYPE_HW)\n+\t\treturn rte_flow_error_set(error, ENOTSUP,\n+\t\t\t\tRTE_FLOW_ERROR_TYPE_UNSPECIFIED,\n+\t\t\t\tNULL,\n+\t\t\t\t\"port configure with incorrect steering mode\");\n+\tfops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);\n+\treturn fops->configure(dev, port_attr, nb_queue, queue_attr, error);\n+}\n+\n /**\n  * Allocate a new memory for the counter values wrapped by all the needed\n  * management.\ndiff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h\nindex b70ef0c1b8..9f0dc4bde7 100644\n--- a/drivers/net/mlx5/mlx5_flow.h\n+++ b/drivers/net/mlx5/mlx5_flow.h\n@@ -1257,6 +1257,17 @@ typedef int (*mlx5_flow_item_update_t)\n \t\t\t const struct rte_flow_item_flex_handle *handle,\n \t\t\t const struct rte_flow_item_flex_conf *conf,\n \t\t\t struct rte_flow_error *error);\n+typedef int (*mlx5_flow_info_get_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\t struct rte_flow_port_info *port_info,\n+\t\t\t struct rte_flow_queue_info *queue_info,\n+\t\t\t struct rte_flow_error *error);\n+typedef int (*mlx5_flow_port_configure_t)\n+\t\t\t(struct rte_eth_dev *dev,\n+\t\t\t const struct rte_flow_port_attr *port_attr,\n+\t\t\t uint16_t nb_queue,\n+\t\t\t const struct rte_flow_queue_attr *queue_attr[],\n+\t\t\t struct rte_flow_error *err);\n \n struct mlx5_flow_driver_ops {\n \tmlx5_flow_validate_t validate;\n@@ -1295,6 +1306,8 @@ struct mlx5_flow_driver_ops {\n \tmlx5_flow_item_create_t item_create;\n \tmlx5_flow_item_release_t item_release;\n \tmlx5_flow_item_update_t item_update;\n+\tmlx5_flow_info_get_t info_get;\n+\tmlx5_flow_port_configure_t configure;\n };\n \n /* mlx5_flow.c */\n@@ -1762,4 +1775,6 @@ const struct mlx5_flow_tunnel *\n mlx5_get_tof(const struct rte_flow_item *items,\n \t     const struct rte_flow_action *actions,\n \t     enum mlx5_tof_rule_type *rule_type);\n+void\n+flow_hw_resource_release(struct rte_eth_dev *dev);\n #endif /* RTE_PMD_MLX5_FLOW_H_ */\ndiff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c\nindex 729d5914a8..e5b2ae91d8 100644\n--- a/drivers/net/mlx5/mlx5_flow_hw.c\n+++ b/drivers/net/mlx5/mlx5_flow_hw.c\n@@ -4,10 +4,169 @@\n \n #include <rte_flow.h>\n \n+#include <mlx5_malloc.h>\n+#include \"mlx5_defs.h\"\n #include \"mlx5_flow.h\"\n \n #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)\n \n const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops;\n \n+/**\n+ * Get information about HWS pre-configurable resources.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[out] port_info\n+ *   Pointer to port information.\n+ * @param[out] queue_info\n+ *   Pointer to queue information.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_hw_info_get(struct rte_eth_dev *dev __rte_unused,\n+\t\t struct rte_flow_port_info *port_info __rte_unused,\n+\t\t struct rte_flow_queue_info *queue_info __rte_unused,\n+\t\t struct rte_flow_error *error __rte_unused)\n+{\n+\t/* Nothing to be updated currently. */\n+\tmemset(port_info, 0, sizeof(*port_info));\n+\t/* Queue size is unlimited from low-level. */\n+\tqueue_info->max_size = UINT32_MAX;\n+\treturn 0;\n+}\n+\n+/**\n+ * Configure port HWS resources.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ * @param[in] port_attr\n+ *   Port configuration attributes.\n+ * @param[in] nb_queue\n+ *   Number of queue.\n+ * @param[in] queue_attr\n+ *   Array that holds attributes for each flow queue.\n+ * @param[out] error\n+ *   Pointer to error structure.\n+ *\n+ * @return\n+ *   0 on success, a negative errno value otherwise and rte_errno is set.\n+ */\n+static int\n+flow_hw_configure(struct rte_eth_dev *dev,\n+\t\t  const struct rte_flow_port_attr *port_attr,\n+\t\t  uint16_t nb_queue,\n+\t\t  const struct rte_flow_queue_attr *queue_attr[],\n+\t\t  struct rte_flow_error *error)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\tstruct mlx5dr_context *dr_ctx = NULL;\n+\tstruct mlx5dr_context_attr dr_ctx_attr = {0};\n+\tstruct mlx5_hw_q *hw_q;\n+\tstruct mlx5_hw_q_job *job = NULL;\n+\tuint32_t mem_size, i, j;\n+\n+\tif (!port_attr || !nb_queue || !queue_attr) {\n+\t\trte_errno = EINVAL;\n+\t\tgoto err;\n+\t}\n+\t/* In case re-configuring, release existing context at first. */\n+\tif (priv->dr_ctx) {\n+\t\t/* */\n+\t\tfor (i = 0; i < nb_queue; i++) {\n+\t\t\thw_q = &priv->hw_q[i];\n+\t\t\t/* Make sure all queues are empty. */\n+\t\t\tif (hw_q->size != hw_q->job_idx) {\n+\t\t\t\trte_errno = EBUSY;\n+\t\t\t\tgoto err;\n+\t\t\t}\n+\t\t}\n+\t\tflow_hw_resource_release(dev);\n+\t}\n+\t/* Allocate the queue job descriptor LIFO. */\n+\tmem_size = sizeof(priv->hw_q[0]) * nb_queue;\n+\tfor (i = 0; i < nb_queue; i++) {\n+\t\t/*\n+\t\t * Check if the queues' size are all the same as the\n+\t\t * limitation from HWS layer.\n+\t\t */\n+\t\tif (queue_attr[i]->size != queue_attr[0]->size) {\n+\t\t\trte_errno = EINVAL;\n+\t\t\tgoto err;\n+\t\t}\n+\t\tmem_size += (sizeof(struct mlx5_hw_q_job *) +\n+\t\t\t    sizeof(struct mlx5_hw_q_job)) *\n+\t\t\t    queue_attr[0]->size;\n+\t}\n+\tpriv->hw_q = mlx5_malloc(MLX5_MEM_ZERO, mem_size,\n+\t\t\t\t 64, SOCKET_ID_ANY);\n+\tif (!priv->hw_q) {\n+\t\trte_errno = ENOMEM;\n+\t\tgoto err;\n+\t}\n+\tfor (i = 0; i < nb_queue; i++) {\n+\t\tpriv->hw_q[i].job_idx = queue_attr[i]->size;\n+\t\tpriv->hw_q[i].size = queue_attr[i]->size;\n+\t\tif (i == 0)\n+\t\t\tpriv->hw_q[i].job = (struct mlx5_hw_q_job **)\n+\t\t\t\t\t    &priv->hw_q[nb_queue];\n+\t\telse\n+\t\t\tpriv->hw_q[i].job = (struct mlx5_hw_q_job **)\n+\t\t\t\t\t    &job[queue_attr[i - 1]->size];\n+\t\tjob = (struct mlx5_hw_q_job *)\n+\t\t      &priv->hw_q[i].job[queue_attr[i]->size];\n+\t\tfor (j = 0; j < queue_attr[i]->size; j++)\n+\t\t\tpriv->hw_q[i].job[j] = &job[j];\n+\t}\n+\tdr_ctx_attr.pd = priv->sh->cdev->pd;\n+\tdr_ctx_attr.queues = nb_queue;\n+\t/* Queue size should all be the same. Take the first one. */\n+\tdr_ctx_attr.queue_size = queue_attr[0]->size;\n+\tdr_ctx = mlx5dr_context_open(priv->sh->cdev->ctx, &dr_ctx_attr);\n+\t/* rte_errno has been updated by HWS layer. */\n+\tif (!dr_ctx)\n+\t\tgoto err;\n+\tpriv->dr_ctx = dr_ctx;\n+\tpriv->nb_queue = nb_queue;\n+\treturn 0;\n+err:\n+\tif (dr_ctx)\n+\t\tclaim_zero(mlx5dr_context_close(dr_ctx));\n+\tmlx5_free(priv->hw_q);\n+\tpriv->hw_q = NULL;\n+\treturn rte_flow_error_set(error, rte_errno,\n+\t\t\t\t  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,\n+\t\t\t\t  \"fail to configure port\");\n+}\n+\n+/**\n+ * Release HWS resources.\n+ *\n+ * @param[in] dev\n+ *   Pointer to the rte_eth_dev structure.\n+ */\n+void\n+flow_hw_resource_release(struct rte_eth_dev *dev)\n+{\n+\tstruct mlx5_priv *priv = dev->data->dev_private;\n+\n+\tif (!priv->dr_ctx)\n+\t\treturn;\n+\tmlx5_free(priv->hw_q);\n+\tpriv->hw_q = NULL;\n+\tclaim_zero(mlx5dr_context_close(priv->dr_ctx));\n+\tpriv->dr_ctx = NULL;\n+\tpriv->nb_queue = 0;\n+}\n+\n+const struct mlx5_flow_driver_ops mlx5_flow_hw_drv_ops = {\n+\t.info_get = flow_hw_info_get,\n+\t.configure = flow_hw_configure,\n+};\n+\n #endif\n",
    "prefixes": [
        "v3",
        "04/14"
    ]
}