get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/103978/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 103978,
    "url": "http://patches.dpdk.org/api/patches/103978/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211108185805.3887-9-eagostini@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211108185805.3887-9-eagostini@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211108185805.3887-9-eagostini@nvidia.com",
    "date": "2021-11-08T18:58:04",
    "name": "[v5,8/9] gpudev: add communication list",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "a649ff8e6203df3498800c347d6963d206bbe1eb",
    "submitter": {
        "id": 1571,
        "url": "http://patches.dpdk.org/api/people/1571/?format=api",
        "name": "Elena Agostini",
        "email": "eagostini@nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211108185805.3887-9-eagostini@nvidia.com/mbox/",
    "series": [
        {
            "id": 20381,
            "url": "http://patches.dpdk.org/api/series/20381/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=20381",
            "date": "2021-11-08T18:57:56",
            "name": "GPU library",
            "version": 5,
            "mbox": "http://patches.dpdk.org/series/20381/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/103978/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/103978/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id D90F3A0C4D;\n\tMon,  8 Nov 2021 11:48:08 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8838C4115C;\n\tMon,  8 Nov 2021 11:47:15 +0100 (CET)",
            "from NAM10-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam10on2073.outbound.protection.outlook.com [40.107.93.73])\n by mails.dpdk.org (Postfix) with ESMTP id AFEA541135\n for <dev@dpdk.org>; Mon,  8 Nov 2021 11:47:11 +0100 (CET)",
            "from MW2PR16CA0037.namprd16.prod.outlook.com (2603:10b6:907:1::14)\n by BY5PR12MB4243.namprd12.prod.outlook.com (2603:10b6:a03:20f::15) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4669.10; Mon, 8 Nov\n 2021 10:47:10 +0000",
            "from CO1NAM11FT064.eop-nam11.prod.protection.outlook.com\n (2603:10b6:907:1:cafe::4f) by MW2PR16CA0037.outlook.office365.com\n (2603:10b6:907:1::14) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4669.11 via Frontend\n Transport; Mon, 8 Nov 2021 10:47:10 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n CO1NAM11FT064.mail.protection.outlook.com (10.13.175.77) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4669.10 via Frontend Transport; Mon, 8 Nov 2021 10:47:09 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Mon, 8 Nov\n 2021 10:47:02 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=cwSnEzZthhwIAjpCJ6d7c3Kw3tjZzoMjLP7ILRvrKp0nNPgGPfw7XFUwCtt/PNXrwVk31amL51XOWPmwhfwl/T6UuQ29Yne4E6ojJaCr2YXlrsW+SOsylbHWtl5vIXt9ZeRjKSy3Ip+IF3ml0M7AW4AjkFQHEb6f8cp/AWvYaoBM2mKiDOSwHrZyYLcy192sY9ioDII2iLJJFdPl9TBV26uq/SYxIDHwHA1/E5VO9qzazD1qQP0OslHYe0Cnbbop4bx8On8vfjgBwftC7WV1M3u8MLD+Eam/n8/m6q6ivtEwHDKbn7NRJzKTFGDPToXzEB0ivHfLkxX6Y5/nBZUtKg==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=VX5zpehkghcafleHeX83G1br/+w3dEjZdifrsXb2y5s=;\n b=CncCEOXRlBRaDpgCxymV9bK6KCNmRLX55L0LfJJnLVMlW0DFlY9PHSjzJ3Z2IvyDTvHvBxbRT+dCKzX2RKh3k7XB/VLVhjIMkDkaiLpgriDUHrGfacrxjUEuNil+aPfFneN8oZ/8i6LxRSXAf/Dd4n4xyCirZ/mYU6Ld5V101R3ZErfVNofB6HCVWc9eATSH6TdrL8kegMZfiCcN//2JmOahA9X9DdGaxZaxX+ljgI3G3y5eGa7arOzno8S/quZgLGUAgsppew9SnJ9CrDRhdaDVVHPaGvR8i6LYT8Wwv1UGf+BKnuh6jKpPUSKNidFLYf75VY02nk2dbXKtDMRycQ==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=VX5zpehkghcafleHeX83G1br/+w3dEjZdifrsXb2y5s=;\n b=I3r1mtYYFa+zWGlbrK/teMxsaT8N4KdgQH38VPsO9YOnFWRLoR9/NbGCFgau8XRyC69pj9qJ6QR7f+Q1cWKUnL2Utpi9MBE5ZgHtRJK/KNSWEhlHN3mz++MJAqrzKxtfLlNJ0FXjY+UARhTeZuIL40KNhi2x5dN445NvXAqkYjM9f7tNWe/7XLb6UF60/0XYFRqcXHDE9sXRR1iuJIkB0RPBH8BBZS8etFobTcNkL8RfBV15BRd4sVxNZGJ77nNoaIUInp2kAR0tw2/F1aIk8wGE2mxvF0CxzM7RMAp4+v8/Fp2cjkeNIhDP1uLorN8XseUFF3qe73Pw9bu/8pnblg==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "<eagostini@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Elena Agostini <eagostini@nvidia.com>",
        "Date": "Mon, 8 Nov 2021 18:58:04 +0000",
        "Message-ID": "<20211108185805.3887-9-eagostini@nvidia.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20211108185805.3887-1-eagostini@nvidia.com>",
        "References": "<20210602203531.2288645-1-thomas@monjalon.net>\n <20211108185805.3887-1-eagostini@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL105.nvidia.com (172.20.187.12) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "74bba9df-e19d-4e42-4752-08d9a2a51d72",
        "X-MS-TrafficTypeDiagnostic": "BY5PR12MB4243:",
        "X-Microsoft-Antispam-PRVS": "\n <BY5PR12MB4243217F781CCFC659539CBACD919@BY5PR12MB4243.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:962;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n nFtp4+7ArNY+I+5f/BjaEUiksqnoPjAqso6FukTpF5n7OJXMlKe+GYShzkXmFzHJRmrTKU2FbThsYAZkIrnI1WPYCqCUwFcKbvlQtKxyrn9FKvWZJ1R7PVYNt9du/DpbgmcH4Cz9Efv+FEBqw15OL91ypZ0IHpbtZhaEETyhICdw3GuTGQuDZlu0VlkCONx9/YjsjWZ3FknU6IAK8p8Vn8FC+TZlg+1CFtJpD1PsDpu8a4ZBJE7pJEgnu5EIyfibSoukqdE8duZpXrz8nWJx2gVnf06ndwC8AXgAezn2dMwiWgWM5HKGPqlRGjsyaHWT9qmHnMoKRkm7yKh/YM1f4vsciJ7Arw5hDcHtuNqM3EwW0t/C0hP6D7vXa44jrf3oI8rhAG1lR5t8eADRQAO9t1homobVWoYOHfWSTaONUBaq6vd54zGQZ8IcukOS/+f2wehyj/qECaD3w9v+Qiebz3AZaP6GdokDG0jhRISRACaELCS9TCJNLfAUysNuHaf78aT9WyBrvh14IEVor984cPrCuhqUdbpHR5Zb4b6ELZArTgFAnkPD6ByACeCh3nlF/KNsnLz+m7iYbK+pcI6vp6ujY0FOn06iMFf6e/6PQUr0DT6qGBPKfHQxtlDtRLCMTVuuyG3TaLxdh2H30JXCBQwF7lCjzViJldcnWrvgKJeYZ5UxvnC3WKuiTev61qcPQ75xrZoROw7K3pBeM9STeQ==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(46966006)(36840700001)(7636003)(356005)(82310400003)(7696005)(26005)(426003)(4326008)(83380400001)(16526019)(2616005)(336012)(508600001)(6286002)(186003)(316002)(8936002)(2906002)(86362001)(36860700001)(70586007)(70206006)(36756003)(2876002)(30864003)(1076003)(8676002)(5660300002)(55016002)(107886003)(6916009)(47076005);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "08 Nov 2021 10:47:09.5968 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n 74bba9df-e19d-4e42-4752-08d9a2a51d72",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT064.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "BY5PR12MB4243",
        "Subject": "[dpdk-dev] [PATCH v5 8/9] gpudev: add communication list",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Elena Agostini <eagostini@nvidia.com>\n\nIn heterogeneous computing system, processing is not only in the CPU.\nSome tasks can be delegated to devices working in parallel.\nWhen mixing network activity with task processing there may be the need\nto put in communication the CPU with the device in order to synchronize\noperations.\n\nAn example could be a receive-and-process application\nwhere CPU is responsible for receiving packets in multiple mbufs\nand the GPU is responsible for processing the content of those packets.\n\nThe purpose of this list is to provide a buffer in CPU memory visible\nfrom the GPU that can be treated as a circular buffer\nto let the CPU provide fondamental info of received packets to the GPU.\n\nA possible use-case is described below.\n\nCPU:\n- Trigger some task on the GPU\n- in a loop:\n    - receive a number of packets\n    - provide packets info to the GPU\n\nGPU:\n- Do some pre-processing\n- Wait to receive a new set of packet to be processed\n\nLayout of a communication list would be:\n\n     -------\n    |   0    | => pkt_list\n    | status |\n    | #pkts  |\n     -------\n    |   1    | => pkt_list\n    | status |\n    | #pkts  |\n     -------\n    |   2    | => pkt_list\n    | status |\n    | #pkts  |\n     -------\n    |  ....  | => pkt_list\n     -------\n\nSigned-off-by: Elena Agostini <eagostini@nvidia.com>\n---\n app/test-gpudev/main.c                 |  95 ++++++++++++++\n doc/guides/prog_guide/gpudev.rst       |  16 +++\n doc/guides/rel_notes/release_21_11.rst |   2 +-\n lib/gpudev/gpudev.c                    | 164 +++++++++++++++++++++++++\n lib/gpudev/meson.build                 |   2 +\n lib/gpudev/rte_gpudev.h                | 129 +++++++++++++++++++\n lib/gpudev/version.map                 |   4 +\n 7 files changed, 411 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/app/test-gpudev/main.c b/app/test-gpudev/main.c\nindex 516a01b927..111ed6d415 100644\n--- a/app/test-gpudev/main.c\n+++ b/app/test-gpudev/main.c\n@@ -209,6 +209,100 @@ create_update_comm_flag(uint16_t gpu_id)\n \treturn 0;\n }\n \n+static int\n+simulate_gpu_task(struct rte_gpu_comm_list *comm_list_item, int num_pkts)\n+{\n+\tint idx;\n+\n+\tif (comm_list_item == NULL)\n+\t\treturn -1;\n+\n+\tfor (idx = 0; idx < num_pkts; idx++) {\n+\t\t/**\n+\t\t * consume(comm_list_item->pkt_list[idx].addr);\n+\t\t */\n+\t}\n+\tcomm_list_item->status = RTE_GPU_COMM_LIST_DONE;\n+\n+\treturn 0;\n+}\n+\n+static int\n+create_update_comm_list(uint16_t gpu_id)\n+{\n+\tint ret = 0;\n+\tint i = 0;\n+\tstruct rte_gpu_comm_list *comm_list;\n+\tuint32_t num_comm_items = 1024;\n+\tstruct rte_mbuf *mbufs[10];\n+\n+\tprintf(\"\\n=======> TEST: Communication list\\n\");\n+\n+\tcomm_list = rte_gpu_comm_create_list(gpu_id, num_comm_items);\n+\tif (comm_list == NULL) {\n+\t\tfprintf(stderr, \"rte_gpu_comm_create_list returned error %d\\n\", ret);\n+\t\treturn -1;\n+\t}\n+\n+\t/**\n+\t * Simulate DPDK receive functions like rte_eth_rx_burst()\n+\t */\n+\tfor (i = 0; i < 10; i++) {\n+\t\tmbufs[i] = rte_zmalloc(NULL, sizeof(struct rte_mbuf), 0);\n+\t\tif (mbufs[i] == NULL) {\n+\t\t\tfprintf(stderr, \"Failed to allocate fake mbufs in CPU memory.\\n\");\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\tmemset(mbufs[i], 0, sizeof(struct rte_mbuf));\n+\t}\n+\n+\t/**\n+\t * Populate just the first item of  the list\n+\t */\n+\tret = rte_gpu_comm_populate_list_pkts(&(comm_list[0]), mbufs, 10);\n+\tif (ret < 0) {\n+\t\tfprintf(stderr, \"rte_gpu_comm_populate_list_pkts returned error %d\\n\", ret);\n+\t\treturn -1;\n+\t}\n+\n+\tret = rte_gpu_comm_cleanup_list(&(comm_list[0]));\n+\tif (ret == 0) {\n+\t\tfprintf(stderr, \"rte_gpu_comm_cleanup_list erroneusly cleaned the list even if packets have not beeing consumed yet\\n\");\n+\t\treturn -1;\n+\t} else {\n+\t\tfprintf(stderr, \"rte_gpu_comm_cleanup_list correctly didn't clean up the packets because they have not beeing consumed yet\\n\");\n+\t}\n+\n+\t/**\n+\t * Simulate a GPU tasks going through the packet list to consume\n+\t * mbufs packets and release them\n+\t */\n+\tsimulate_gpu_task(&(comm_list[0]), 10);\n+\n+\t/**\n+\t * Packets have been consumed, now the communication item\n+\t * and the related mbufs can be all released\n+\t */\n+\tret = rte_gpu_comm_cleanup_list(&(comm_list[0]));\n+\tif (ret < 0) {\n+\t\tfprintf(stderr, \"rte_gpu_comm_cleanup_list returned error %d\\n\", ret);\n+\t\treturn -1;\n+\t}\n+\n+\tret = rte_gpu_comm_destroy_list(comm_list, num_comm_items);\n+\tif (ret < 0) {\n+\t\tfprintf(stderr, \"rte_gpu_comm_destroy_list returned error %d\\n\", ret);\n+\t\treturn -1;\n+\t}\n+\n+\tfor (i = 0; i < 10; i++)\n+\t\trte_free(mbufs[i]);\n+\n+\tprintf(\"\\nCommunication list test passed!\\n\");\n+\treturn 0;\n+}\n+\n int\n main(int argc, char **argv)\n {\n@@ -263,6 +357,7 @@ main(int argc, char **argv)\n \t * Communication items test\n \t */\n \tcreate_update_comm_flag(gpu_id);\n+\tcreate_update_comm_list(gpu_id);\n \n \t/* clean up the EAL */\n \trte_eal_cleanup();\ndiff --git a/doc/guides/prog_guide/gpudev.rst b/doc/guides/prog_guide/gpudev.rst\nindex e0db627aed..cbaec5a1e4 100644\n--- a/doc/guides/prog_guide/gpudev.rst\n+++ b/doc/guides/prog_guide/gpudev.rst\n@@ -86,3 +86,19 @@ that's waiting to receive a signal from the CPU\n to move forward with the execution.\n The communication flag allocates a CPU memory GPU-visible ``uint32_t`` flag\n that can be used by the CPU to communicate with a GPU task.\n+\n+Communication list\n+~~~~~~~~~~~~~~~~~~\n+\n+By default, DPDK pulls free mbufs from a mempool to receive packets.\n+Best practice, expecially in a multithreaded application,\n+is to no make any assumption on which mbufs will be used\n+to receive the next bursts of packets.\n+Considering an application with a GPU memory mempool\n+attached to a receive queue having some task waiting on the GPU\n+to receive a new burst of packets to be processed,\n+there is the need to communicate from the CPU\n+the list of mbuf payload addresses where received packet have been stored.\n+The ``rte_gpu_comm_*()`` functions are responsible to create a list of packets\n+that can be populated with receive mbuf payload addresses\n+and communicated to the task running on the GPU.\ndiff --git a/doc/guides/rel_notes/release_21_11.rst b/doc/guides/rel_notes/release_21_11.rst\nindex 78b29d9a25..23d8591f40 100644\n--- a/doc/guides/rel_notes/release_21_11.rst\n+++ b/doc/guides/rel_notes/release_21_11.rst\n@@ -105,7 +105,7 @@ New Features\n \n   * Device information\n   * Memory management\n-  * Communication flag\n+  * Communication flag & list\n \n * **Added new RSS offload types for IPv4/L4 checksum in RSS flow.**\n \ndiff --git a/lib/gpudev/gpudev.c b/lib/gpudev/gpudev.c\nindex f887f3dd93..88148eb704 100644\n--- a/lib/gpudev/gpudev.c\n+++ b/lib/gpudev/gpudev.c\n@@ -735,3 +735,167 @@ rte_gpu_comm_get_flag_value(struct rte_gpu_comm_flag *devflag, uint32_t *val)\n \n \treturn 0;\n }\n+\n+struct rte_gpu_comm_list *\n+rte_gpu_comm_create_list(uint16_t dev_id,\n+\t\tuint32_t num_comm_items)\n+{\n+\tstruct rte_gpu_comm_list *comm_list;\n+\tuint32_t idx_l;\n+\tint ret;\n+\tstruct rte_gpu *dev;\n+\n+\tif (num_comm_items == 0) {\n+\t\trte_errno = EINVAL;\n+\t\treturn NULL;\n+\t}\n+\n+\tdev = gpu_get_by_id(dev_id);\n+\tif (dev == NULL) {\n+\t\tGPU_LOG(ERR, \"memory barrier for invalid device ID %d\", dev_id);\n+\t\trte_errno = ENODEV;\n+\t\treturn NULL;\n+\t}\n+\n+\tcomm_list = rte_zmalloc(NULL, sizeof(struct rte_gpu_comm_list) * num_comm_items, 0);\n+\tif (comm_list == NULL) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\tret = rte_gpu_register(dev_id, sizeof(struct rte_gpu_comm_list) * num_comm_items, comm_list);\n+\tif (ret < 0) {\n+\t\trte_errno = ENOMEM;\n+\t\treturn NULL;\n+\t}\n+\n+\tfor (idx_l = 0; idx_l < num_comm_items; idx_l++) {\n+\t\tcomm_list[idx_l].pkt_list = rte_zmalloc(NULL, sizeof(struct rte_gpu_comm_pkt) * RTE_GPU_COMM_LIST_PKTS_MAX, 0);\n+\t\tif (comm_list[idx_l].pkt_list == NULL) {\n+\t\t\trte_errno = ENOMEM;\n+\t\t\treturn NULL;\n+\t\t}\n+\n+\t\tret = rte_gpu_register(dev_id, sizeof(struct rte_gpu_comm_pkt) * RTE_GPU_COMM_LIST_PKTS_MAX, comm_list[idx_l].pkt_list);\n+\t\tif (ret < 0) {\n+\t\t\trte_errno = ENOMEM;\n+\t\t\treturn NULL;\n+\t\t}\n+\n+\t\tRTE_GPU_VOLATILE(comm_list[idx_l].status) = RTE_GPU_COMM_LIST_FREE;\n+\t\tcomm_list[idx_l].num_pkts = 0;\n+\t\tcomm_list[idx_l].dev_id = dev_id;\n+\n+\t\tcomm_list[idx_l].mbufs = rte_zmalloc(NULL, sizeof(struct rte_mbuf *) * RTE_GPU_COMM_LIST_PKTS_MAX, 0);\n+\t\tif (comm_list[idx_l].mbufs == NULL) {\n+\t\t\trte_errno = ENOMEM;\n+\t\t\treturn NULL;\n+\t\t}\n+\t}\n+\n+\treturn comm_list;\n+}\n+\n+int\n+rte_gpu_comm_destroy_list(struct rte_gpu_comm_list *comm_list,\n+\t\tuint32_t num_comm_items)\n+{\n+\tuint32_t idx_l;\n+\tint ret;\n+\tuint16_t dev_id;\n+\n+\tif (comm_list == NULL) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tdev_id = comm_list[0].dev_id;\n+\n+\tfor (idx_l = 0; idx_l < num_comm_items; idx_l++) {\n+\t\tret = rte_gpu_unregister(dev_id, comm_list[idx_l].pkt_list);\n+\t\tif (ret < 0) {\n+\t\t\trte_errno = EINVAL;\n+\t\t\treturn -1;\n+\t\t}\n+\n+\t\trte_free(comm_list[idx_l].pkt_list);\n+\t\trte_free(comm_list[idx_l].mbufs);\n+\t}\n+\n+\tret = rte_gpu_unregister(dev_id, comm_list);\n+\tif (ret < 0) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -1;\n+\t}\n+\n+\trte_free(comm_list);\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_gpu_comm_populate_list_pkts(struct rte_gpu_comm_list *comm_list_item,\n+\t\tstruct rte_mbuf **mbufs, uint32_t num_mbufs)\n+{\n+\tuint32_t idx;\n+\n+\tif (comm_list_item == NULL || comm_list_item->pkt_list == NULL ||\n+\t\t\tmbufs == NULL || num_mbufs > RTE_GPU_COMM_LIST_PKTS_MAX) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tfor (idx = 0; idx < num_mbufs; idx++) {\n+\t\t/* support only unchained mbufs */\n+\t\tif (unlikely((mbufs[idx]->nb_segs > 1) ||\n+\t\t\t\t(mbufs[idx]->next != NULL) ||\n+\t\t\t\t(mbufs[idx]->data_len != mbufs[idx]->pkt_len))) {\n+\t\t\trte_errno = ENOTSUP;\n+\t\t\treturn -rte_errno;\n+\t\t}\n+\t\tcomm_list_item->pkt_list[idx].addr =\n+\t\t\t\trte_pktmbuf_mtod_offset(mbufs[idx], uintptr_t, 0);\n+\t\tcomm_list_item->pkt_list[idx].size = mbufs[idx]->pkt_len;\n+\t\tcomm_list_item->mbufs[idx] = mbufs[idx];\n+\t}\n+\n+\tRTE_GPU_VOLATILE(comm_list_item->num_pkts) = num_mbufs;\n+\trte_gpu_mbw(comm_list_item->dev_id);\n+\tRTE_GPU_VOLATILE(comm_list_item->status) = RTE_GPU_COMM_LIST_READY;\n+\trte_gpu_mbw(comm_list_item->dev_id);\n+\n+\treturn 0;\n+}\n+\n+int\n+rte_gpu_comm_cleanup_list(struct rte_gpu_comm_list *comm_list_item)\n+{\n+\tuint32_t idx = 0;\n+\n+\tif (comm_list_item == NULL) {\n+\t\trte_errno = EINVAL;\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tif (RTE_GPU_VOLATILE(comm_list_item->status) ==\n+\t\t\tRTE_GPU_COMM_LIST_READY) {\n+\t\tGPU_LOG(ERR, \"packet list is still in progress\");\n+\t\trte_errno = EINVAL;\n+\t\treturn -rte_errno;\n+\t}\n+\n+\tfor (idx = 0; idx < RTE_GPU_COMM_LIST_PKTS_MAX; idx++) {\n+\t\tif (comm_list_item->pkt_list[idx].addr == 0)\n+\t\t\tbreak;\n+\n+\t\tcomm_list_item->pkt_list[idx].addr = 0;\n+\t\tcomm_list_item->pkt_list[idx].size = 0;\n+\t\tcomm_list_item->mbufs[idx] = NULL;\n+\t}\n+\n+\tRTE_GPU_VOLATILE(comm_list_item->status) = RTE_GPU_COMM_LIST_FREE;\n+\tRTE_GPU_VOLATILE(comm_list_item->num_pkts) = 0;\n+\trte_mb();\n+\n+\treturn 0;\n+}\ndiff --git a/lib/gpudev/meson.build b/lib/gpudev/meson.build\nindex 608154817b..89a118f357 100644\n--- a/lib/gpudev/meson.build\n+++ b/lib/gpudev/meson.build\n@@ -8,3 +8,5 @@ headers = files(\n sources = files(\n         'gpudev.c',\n )\n+\n+deps += ['mbuf']\ndiff --git a/lib/gpudev/rte_gpudev.h b/lib/gpudev/rte_gpudev.h\nindex 1466ac164b..3023154be8 100644\n--- a/lib/gpudev/rte_gpudev.h\n+++ b/lib/gpudev/rte_gpudev.h\n@@ -9,6 +9,7 @@\n #include <stdint.h>\n #include <stdbool.h>\n \n+#include <rte_mbuf.h>\n #include <rte_bitops.h>\n #include <rte_compat.h>\n \n@@ -41,6 +42,9 @@ extern \"C\" {\n /** Access variable as volatile. */\n #define RTE_GPU_VOLATILE(x) (*(volatile typeof(x) *)&(x))\n \n+/** Max number of packets per communication list. */\n+#define RTE_GPU_COMM_LIST_PKTS_MAX 1024\n+\n /** Store device info. */\n struct rte_gpu_info {\n \t/** Unique identifier name. */\n@@ -87,6 +91,43 @@ struct rte_gpu_comm_flag {\n \tenum rte_gpu_comm_flag_type mtype;\n };\n \n+/** List of packets shared among CPU and device. */\n+struct rte_gpu_comm_pkt {\n+\t/** Address of the packet in memory (e.g. mbuf->buf_addr). */\n+\tuintptr_t addr;\n+\t/** Size in byte of the packet. */\n+\tsize_t size;\n+};\n+\n+/** Possible status for the list of packets shared among CPU and device. */\n+enum rte_gpu_comm_list_status {\n+\t/** Packet list can be filled with new mbufs, no one is using it. */\n+\tRTE_GPU_COMM_LIST_FREE = 0,\n+\t/** Packet list has been filled with new mbufs and it's ready to be used .*/\n+\tRTE_GPU_COMM_LIST_READY,\n+\t/** Packet list has been processed, it's ready to be freed. */\n+\tRTE_GPU_COMM_LIST_DONE,\n+\t/** Some error occurred during packet list processing. */\n+\tRTE_GPU_COMM_LIST_ERROR,\n+};\n+\n+/**\n+ * Communication list holding a number of lists of packets\n+ * each having a status flag.\n+ */\n+struct rte_gpu_comm_list {\n+\t/** Device that will use the communication list. */\n+\tuint16_t dev_id;\n+\t/** List of mbufs populated by the CPU with a set of mbufs. */\n+\tstruct rte_mbuf **mbufs;\n+\t/** List of packets populated by the CPU with a set of mbufs info. */\n+\tstruct rte_gpu_comm_pkt *pkt_list;\n+\t/** Number of packets in the list. */\n+\tuint32_t num_pkts;\n+\t/** Status of the list. */\n+\tenum rte_gpu_comm_list_status status;\n+};\n+\n /**\n  * @warning\n  * @b EXPERIMENTAL: this API may change without prior notice.\n@@ -513,6 +554,94 @@ __rte_experimental\n int rte_gpu_comm_get_flag_value(struct rte_gpu_comm_flag *devflag,\n \t\tuint32_t *val);\n \n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Create a communication list that can be used to share packets\n+ * between CPU and device.\n+ * Each element of the list contains:\n+ *  - a packet list of RTE_GPU_COMM_LIST_PKTS_MAX elements\n+ *  - number of packets in the list\n+ *  - a status flag to communicate if the packet list is FREE,\n+ *    READY to be processed, DONE with processing.\n+ *\n+ * The list is allocated in CPU-visible memory.\n+ * At creation time, every list is in FREE state.\n+ *\n+ * @param dev_id\n+ *   Reference device ID.\n+ * @param num_comm_items\n+ *   Number of items in the communication list.\n+ *\n+ * @return\n+ *   A pointer to the allocated list, otherwise NULL and rte_errno is set:\n+ *   - EINVAL if invalid input params\n+ */\n+__rte_experimental\n+struct rte_gpu_comm_list *rte_gpu_comm_create_list(uint16_t dev_id,\n+\t\tuint32_t num_comm_items);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Destroy a communication list.\n+ *\n+ * @param comm_list\n+ *   Communication list to be destroyed.\n+ * @param num_comm_items\n+ *   Number of items in the communication list.\n+ *\n+ * @return\n+ *   0 on success, -rte_errno otherwise:\n+ *   - EINVAL if invalid input params\n+ */\n+__rte_experimental\n+int rte_gpu_comm_destroy_list(struct rte_gpu_comm_list *comm_list,\n+\t\tuint32_t num_comm_items);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Populate the packets list of the communication item\n+ * with info from a list of mbufs.\n+ * Status flag of that packet list is set to READY.\n+ *\n+ * @param comm_list_item\n+ *   Communication list item to fill.\n+ * @param mbufs\n+ *   List of mbufs.\n+ * @param num_mbufs\n+ *   Number of mbufs.\n+ *\n+ * @return\n+ *   0 on success, -rte_errno otherwise:\n+ *   - EINVAL if invalid input params\n+ *   - ENOTSUP if mbufs are chained (multiple segments)\n+ */\n+__rte_experimental\n+int rte_gpu_comm_populate_list_pkts(struct rte_gpu_comm_list *comm_list_item,\n+\t\tstruct rte_mbuf **mbufs, uint32_t num_mbufs);\n+\n+/**\n+ * @warning\n+ * @b EXPERIMENTAL: this API may change without prior notice.\n+ *\n+ * Reset a communication list item to the original state.\n+ * The status flag set to FREE and mbufs are returned to the pool.\n+ *\n+ * @param comm_list_item\n+ *   Communication list item to reset.\n+ *\n+ * @return\n+ *   0 on success, -rte_errno otherwise:\n+ *   - EINVAL if invalid input params\n+ */\n+__rte_experimental\n+int rte_gpu_comm_cleanup_list(struct rte_gpu_comm_list *comm_list_item);\n+\n #ifdef __cplusplus\n }\n #endif\ndiff --git a/lib/gpudev/version.map b/lib/gpudev/version.map\nindex 2fc039373a..45a35fa6e4 100644\n--- a/lib/gpudev/version.map\n+++ b/lib/gpudev/version.map\n@@ -6,9 +6,13 @@ EXPERIMENTAL {\n \trte_gpu_callback_register;\n \trte_gpu_callback_unregister;\n \trte_gpu_close;\n+\trte_gpu_comm_cleanup_list;\n \trte_gpu_comm_create_flag;\n+\trte_gpu_comm_create_list;\n \trte_gpu_comm_destroy_flag;\n+\trte_gpu_comm_destroy_list;\n \trte_gpu_comm_get_flag_value;\n+\trte_gpu_comm_populate_list_pkts;\n \trte_gpu_comm_set_flag;\n \trte_gpu_count_avail;\n \trte_gpu_find_next;\n",
    "prefixes": [
        "v5",
        "8/9"
    ]
}