get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/103979/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 103979,
    "url": "http://patches.dpdk.org/api/patches/103979/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20211108185805.3887-10-eagostini@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20211108185805.3887-10-eagostini@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20211108185805.3887-10-eagostini@nvidia.com",
    "date": "2021-11-08T18:58:05",
    "name": "[v5,9/9] doc: add CUDA example in GPU guide",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "742c7eff44aa42183473f133eb96a82151f6b4fc",
    "submitter": {
        "id": 1571,
        "url": "http://patches.dpdk.org/api/people/1571/?format=api",
        "name": "Elena Agostini",
        "email": "eagostini@nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20211108185805.3887-10-eagostini@nvidia.com/mbox/",
    "series": [
        {
            "id": 20381,
            "url": "http://patches.dpdk.org/api/series/20381/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=20381",
            "date": "2021-11-08T18:57:56",
            "name": "GPU library",
            "version": 5,
            "mbox": "http://patches.dpdk.org/series/20381/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/103979/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/103979/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 4DBD1A0C4D;\n\tMon,  8 Nov 2021 11:48:15 +0100 (CET)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 8FEE241160;\n\tMon,  8 Nov 2021 11:47:16 +0100 (CET)",
            "from NAM12-DM6-obe.outbound.protection.outlook.com\n (mail-dm6nam12on2082.outbound.protection.outlook.com [40.107.243.82])\n by mails.dpdk.org (Postfix) with ESMTP id 2206D41135\n for <dev@dpdk.org>; Mon,  8 Nov 2021 11:47:12 +0100 (CET)",
            "from MW2PR16CA0052.namprd16.prod.outlook.com (2603:10b6:907:1::29)\n by MN2PR12MB4286.namprd12.prod.outlook.com (2603:10b6:208:199::22) with\n Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4669.11; Mon, 8 Nov\n 2021 10:47:11 +0000",
            "from CO1NAM11FT064.eop-nam11.prod.protection.outlook.com\n (2603:10b6:907:1:cafe::1f) by MW2PR16CA0052.outlook.office365.com\n (2603:10b6:907:1::29) with Microsoft SMTP Server (version=TLS1_2,\n cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.4669.10 via Frontend\n Transport; Mon, 8 Nov 2021 10:47:10 +0000",
            "from mail.nvidia.com (216.228.112.34) by\n CO1NAM11FT064.mail.protection.outlook.com (10.13.175.77) with Microsoft SMTP\n Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384) id\n 15.20.4669.10 via Frontend Transport; Mon, 8 Nov 2021 10:47:10 +0000",
            "from nvidia.com (172.20.187.6) by HQMAIL107.nvidia.com\n (172.20.187.13) with Microsoft SMTP Server (TLS) id 15.0.1497.18; Mon, 8 Nov\n 2021 10:47:02 +0000"
        ],
        "ARC-Seal": "i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none;\n b=YcLK8JYYE23Sf384Hufl3pkOdhu/q7UI5dTd0vzpteNV8IfYScUzfOcDhjbSgK8AZEHZ8ubXjOVDs71neViEh5p6I0uAv3ZrJGWeyAxKbWJCWr2+T36NZ9Z88TSueUDoyR0creJZZHnuuAdGM6sMy0pE4VTnhe/aHEdp7/OYlEnCDZYemRrfycWrnxgesgfMd2sfGr/+vc2Lqxn6QwOmxL/kD9X0BZwrBUntAxvP7lOjkmCHFQSszK9pXUBj1VaOKYwVj6zNuA9DpuJ4n4AAleQ0uXUqd0ZOwIV0otAmGxPJwcqT54+DQ+TdMR+wCMcXTQclqg8ymJ8z8VHAJqapug==",
        "ARC-Message-Signature": "i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com;\n s=arcselector9901;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1;\n bh=OxtiHhow+oMMccuwlq9p6l5sIJT9EIjR/wpVQcG82Kw=;\n b=ab0b/L2cjd7jwKCx2i+8/ZLSTTodqJ3S23PKykTRaxlPULHn2L/0VUM5xjfz4qm/uAacJManN1+edSbYkbFok+XJEoxxeAh9UO/aMJpnSFsv5Wdg72vmrWcf/6Rsm62lcZyWPJsZfFhgwQOpI6aO9sSYTLGXJrkPAvbG5taQ9DaXUojtCDLl49fqW60yUZHSAiXR5WMxZazffEnZF/yJOBAe8I6Y90M7Xk0WDHbn9xXScA3yk6OngiS0u8+8ouZwufQXyRhEnRklaot5EWOaMzvlktgBZGWI4JY4k51e3Sq4JAyaG759Ga3z9s91d562S3YThLiviqxjSc0kd6WH+g==",
        "ARC-Authentication-Results": "i=1; mx.microsoft.com 1; spf=pass (sender ip is\n 216.228.112.34) smtp.rcpttodomain=dpdk.org smtp.mailfrom=nvidia.com;\n dmarc=pass (p=quarantine sp=quarantine pct=100) action=none\n header.from=nvidia.com; dkim=none (message not signed); arc=none",
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com;\n s=selector2;\n h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck;\n bh=OxtiHhow+oMMccuwlq9p6l5sIJT9EIjR/wpVQcG82Kw=;\n b=mNQ2s0gO3/+0HlGrzjc1TY2N9fVZPOscc/+TkqbfzOdnLvMvdkU3LDLAvU+3VQ4JoAu6/eBxZonKw4FoKB32Iv40kLr2aqhokHzSCczM6R1j+O3SlevIiOtUp7Msn6OYAMC88bB4SSkhdbkvB4o+YFaBu/h1h95qOLLeIYqiqYoqPYOGnOVmEASS/DVdaoEH+15tFZ7Imup37Vnn9ARLg52rEwMiPP1bq2UDFx1CvIFHswY1Dw8mRQLN73P0N7tUFgMAQathP3Z0UZHYOhT2YgBWQQdCMjG7yu0u/WCsAlHwOp1NsSWn4apk6Z28cxjkqLAZV1VBO1e/HZ4/KhcGFQ==",
        "X-MS-Exchange-Authentication-Results": "spf=pass (sender IP is 216.228.112.34)\n smtp.mailfrom=nvidia.com; dpdk.org; dkim=none (message not signed)\n header.d=none;dpdk.org; dmarc=pass action=none header.from=nvidia.com;",
        "Received-SPF": "Pass (protection.outlook.com: domain of nvidia.com designates\n 216.228.112.34 as permitted sender) receiver=protection.outlook.com;\n client-ip=216.228.112.34; helo=mail.nvidia.com;",
        "From": "<eagostini@nvidia.com>",
        "To": "<dev@dpdk.org>",
        "CC": "Elena Agostini <eagostini@nvidia.com>",
        "Date": "Mon, 8 Nov 2021 18:58:05 +0000",
        "Message-ID": "<20211108185805.3887-10-eagostini@nvidia.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20211108185805.3887-1-eagostini@nvidia.com>",
        "References": "<20210602203531.2288645-1-thomas@monjalon.net>\n <20211108185805.3887-1-eagostini@nvidia.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Originating-IP": "[172.20.187.6]",
        "X-ClientProxiedBy": "HQMAIL105.nvidia.com (172.20.187.12) To\n HQMAIL107.nvidia.com (172.20.187.13)",
        "X-EOPAttributedMessage": "0",
        "X-MS-PublicTrafficType": "Email",
        "X-MS-Office365-Filtering-Correlation-Id": "e9d997b5-059e-4539-3f2f-08d9a2a51de7",
        "X-MS-TrafficTypeDiagnostic": "MN2PR12MB4286:",
        "X-Microsoft-Antispam-PRVS": "\n <MN2PR12MB4286605BF0A612AA2142D408CD919@MN2PR12MB4286.namprd12.prod.outlook.com>",
        "X-MS-Oob-TLC-OOBClassifiers": "OLM:6430;",
        "X-MS-Exchange-SenderADCheck": "1",
        "X-MS-Exchange-AntiSpam-Relay": "0",
        "X-Microsoft-Antispam": "BCL:0;",
        "X-Microsoft-Antispam-Message-Info": "\n x5lmRAxraCZ+CvfCzB+rfg3862p9FIyBYqB/2I9c097MhlHqMxr38elTNATXFwrCxlfKHFXWGyUDe7/P+NkmqU3dIwSaDzp1Lc7ntPj0OrDEoJFaop2febJ+z38+qST2THS0077k1MqgrqX8cG2QjHecbfr2SwzDwWNYuf6mIYD46cXKiksF8zOiYsU+G3njTq27VnFhhX2JKh3M0fbFtfXBTNAALJ8jKrGOHx4XXZ4revOdIM3CWVUMEHEmGUxXE6lmfCnOj3rNlBcUfeIJXfq5ck5rZRftsybxCiUP2/lS16rFyiTUmhnNbt77UU8wS0d5/xeSS3RWedTOwAEjeyPUNuvXZ7l+fy3GeJ/FoFKn/JBgXnmzjPExnD7MA5sqWKwWX2rdH1zTOkahZko7M34zdFSJEFwAPa7uPfNlwCPKfsTKFFRvNlzwuY2FW4Dqw2N2ul2UNdH6pg0ZF8t7kGNMHfXcseQbbkEEYG2rFHurm8wVN+Tkw/8vDgU5Wd8XbUEJ8VR7h+dWHKLxPthlRqsZ9A51v6t21DXFfndlMMrEPjx15OXehRFoPSQXdIAMTmwGWmrFrrEQxqmV+YBtNPzCSle+u2x24tdSHB7kJfyTNJVQDBDSnbKrpcxfHsXOALYSQLp8gyaqK7ZvJ5h7kz6TsDeLFpz/2vLjnRHg+KbxwApSumAm4C0/aqgzjP4mkEFybkP8eUNOj8UaY+OE9A==",
        "X-Forefront-Antispam-Report": "CIP:216.228.112.34; CTRY:US; LANG:en; SCL:1;\n SRV:;\n IPV:NLI; SFV:NSPM; H:mail.nvidia.com; PTR:schybrid03.nvidia.com; CAT:NONE;\n SFS:(4636009)(46966006)(36840700001)(2876002)(336012)(55016002)(86362001)(7696005)(36860700001)(16526019)(186003)(2616005)(8936002)(6916009)(426003)(36756003)(82310400003)(26005)(316002)(8676002)(70586007)(70206006)(508600001)(83380400001)(47076005)(356005)(1076003)(7636003)(6286002)(2906002)(107886003)(4326008)(5660300002);\n DIR:OUT; SFP:1101;",
        "X-OriginatorOrg": "Nvidia.com",
        "X-MS-Exchange-CrossTenant-OriginalArrivalTime": "08 Nov 2021 10:47:10.3554 (UTC)",
        "X-MS-Exchange-CrossTenant-Network-Message-Id": "\n e9d997b5-059e-4539-3f2f-08d9a2a51de7",
        "X-MS-Exchange-CrossTenant-Id": "43083d15-7273-40c1-b7db-39efd9ccc17a",
        "X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp": "\n TenantId=43083d15-7273-40c1-b7db-39efd9ccc17a; Ip=[216.228.112.34];\n Helo=[mail.nvidia.com]",
        "X-MS-Exchange-CrossTenant-AuthSource": "\n CO1NAM11FT064.eop-nam11.prod.protection.outlook.com",
        "X-MS-Exchange-CrossTenant-AuthAs": "Anonymous",
        "X-MS-Exchange-CrossTenant-FromEntityHeader": "HybridOnPrem",
        "X-MS-Exchange-Transport-CrossTenantHeadersStamped": "MN2PR12MB4286",
        "Subject": "[dpdk-dev] [PATCH v5 9/9] doc: add CUDA example in GPU guide",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Elena Agostini <eagostini@nvidia.com>\n\nSigned-off-by: Elena Agostini <eagostini@nvidia.com>\n---\n doc/guides/prog_guide/gpudev.rst | 122 +++++++++++++++++++++++++++++++\n 1 file changed, 122 insertions(+)",
    "diff": "diff --git a/doc/guides/prog_guide/gpudev.rst b/doc/guides/prog_guide/gpudev.rst\nindex cbaec5a1e4..1baf0c6772 100644\n--- a/doc/guides/prog_guide/gpudev.rst\n+++ b/doc/guides/prog_guide/gpudev.rst\n@@ -102,3 +102,125 @@ the list of mbuf payload addresses where received packet have been stored.\n The ``rte_gpu_comm_*()`` functions are responsible to create a list of packets\n that can be populated with receive mbuf payload addresses\n and communicated to the task running on the GPU.\n+\n+\n+CUDA Example\n+------------\n+\n+In the example below, there is a pseudo-code to give an example\n+about how to use functions in this library in case of a CUDA application.\n+\n+.. code-block:: c\n+\n+   //////////////////////////////////////////////////////////////////////////\n+   ///// gpudev library + CUDA functions\n+   //////////////////////////////////////////////////////////////////////////\n+   #define GPU_PAGE_SHIFT 16\n+   #define GPU_PAGE_SIZE (1UL << GPU_PAGE_SHIFT)\n+\n+   int main() {\n+       struct rte_gpu_flag quit_flag;\n+       struct rte_gpu_comm_list *comm_list;\n+       int nb_rx = 0;\n+       int comm_list_entry = 0;\n+       struct rte_mbuf * rx_mbufs[max_rx_mbufs];\n+       cudaStream_t cstream;\n+       struct rte_mempool *mpool_payload, *mpool_header;\n+       struct rte_pktmbuf_extmem ext_mem;\n+       int16_t dev_id;\n+       int16_t port_id = 0;\n+\n+       /** Initialize CUDA objects (cstream, context, etc..). */\n+       /** Use gpudev library to register a new CUDA context if any */\n+       /** Let's assume the application wants to use the default context of the GPU device 0 */\n+\n+       dev_id = 0;\n+\n+       /**\n+        * Create an external memory mempool using memory allocated on the GPU.\n+        */\n+       ext_mem.elt_size = mbufs_headroom_size;\n+                   ext_mem.buf_len = RTE_ALIGN_CEIL(mbufs_num * ext_mem.elt_size, GPU_PAGE_SIZE);\n+       ext_mem.buf_iova = RTE_BAD_IOVA;\n+       ext_mem.buf_ptr = rte_gpu_malloc(dev_id, ext_mem.buf_len, 0);\n+       rte_extmem_register(ext_mem.buf_ptr, ext_mem.buf_len, NULL, ext_mem.buf_iova, GPU_PAGE_SIZE);\n+       rte_dev_dma_map(rte_eth_devices[port_id].device, ext_mem.buf_ptr, ext_mem.buf_iova, ext_mem.buf_len);\n+       mpool_payload = rte_pktmbuf_pool_create_extbuf(\"gpu_mempool\", mbufs_num,\n+                                                       0, 0, ext_mem.elt_size,\n+                                                       rte_socket_id(), &ext_mem, 1);\n+\n+       /**\n+        * Create CPU - device communication flag. With this flag, the CPU can tell to the CUDA kernel\n+        * to exit from the main loop.\n+        */\n+       rte_gpu_comm_create_flag(dev_id, &quit_flag, RTE_GPU_COMM_FLAG_CPU);\n+       rte_gpu_comm_set_flag(&quit_flag , 0);\n+\n+       /**\n+        * Create CPU - device communication list. Each entry of this list will be populated by the CPU\n+        * with a new set of received mbufs that the CUDA kernel has to process.\n+        */\n+       comm_list = rte_gpu_comm_create_list(dev_id, num_entries);\n+\n+       /** A very simple CUDA kernel with just 1 CUDA block and RTE_GPU_COMM_LIST_PKTS_MAX CUDA threads. */\n+       cuda_kernel_packet_processing<<<1, RTE_GPU_COMM_LIST_PKTS_MAX, 0, cstream>>>(quit_flag->ptr, comm_list, num_entries, ...);\n+\n+       /**\n+        * For simplicity, the CPU here receives only 2 bursts of mbufs.\n+        * In a real application, network activity and device processing should overlap.\n+        */\n+       nb_rx = rte_eth_rx_burst(port_id, queue_id, &(rx_mbufs[0]), max_rx_mbufs);\n+       rte_gpu_comm_populate_list_pkts(comm_list[0], rx_mbufs, nb_rx);\n+       nb_rx = rte_eth_rx_burst(port_id, queue_id, &(rx_mbufs[0]), max_rx_mbufs);\n+       rte_gpu_comm_populate_list_pkts(comm_list[1], rx_mbufs, nb_rx);\n+\n+       /**\n+        * CPU waits for the completion of the packets' processing on the CUDA kernel\n+        * and then it does a cleanup of the received mbufs.\n+        */\n+       while(rte_gpu_comm_cleanup_list(comm_list[0]));\n+       while(rte_gpu_comm_cleanup_list(comm_list[1]));\n+\n+       /** CPU notifies the CUDA kernel that it has to terminate */\n+       rte_gpu_comm_set_flag(&quit_flag, 1);\n+\n+       /** gpudev objects cleanup/destruction */\n+       /** CUDA cleanup */\n+\n+       rte_gpu_free(dev_id, ext_mem.buf_len);\n+\n+       /** DPDK cleanup */\n+\n+       return 0;\n+   }\n+\n+   //////////////////////////////////////////////////////////////////////////\n+   ///// CUDA kernel\n+   //////////////////////////////////////////////////////////////////////////\n+\n+   void cuda_kernel(uint32_t * quit_flag_ptr, struct rte_gpu_comm_list *comm_list, int comm_list_entries) {\n+      int comm_list_index = 0;\n+      struct rte_gpu_comm_pkt *pkt_list = NULL;\n+\n+      /** Do some pre-processing operations. */\n+\n+      /** GPU kernel keeps checking this flag to know if it has to quit or wait for more packets. */\n+      while(*quit_flag_ptr == 0)\n+      {\n+         if(comm_list[comm_list_index]->status != RTE_GPU_COMM_LIST_READY)\n+         continue;\n+\n+         if(threadIdx.x < comm_list[comm_list_index]->num_pkts)\n+         {\n+            /** Each CUDA thread processes a different packet. */\n+            packet_processing(comm_list[comm_list_index]->addr, comm_list[comm_list_index]->size, ..);\n+         }\n+         __threadfence();\n+         __syncthreads();\n+\n+         /** Wait for new packets on the next communication list entry. */\n+         comm_list_index = (comm_list_index+1) % comm_list_entries;\n+      }\n+\n+      /** Do some post-processing operations. */\n+   }\n",
    "prefixes": [
        "v5",
        "9/9"
    ]
}