get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/77302/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 77302,
    "url": "http://patches.dpdk.org/api/patches/77302/?format=api",
    "web_url": "http://patches.dpdk.org/project/dts/patch/1599764335-1164-2-git-send-email-lihongx.ma@intel.com/",
    "project": {
        "id": 3,
        "url": "http://patches.dpdk.org/api/projects/3/?format=api",
        "name": "DTS",
        "link_name": "dts",
        "list_id": "dts.dpdk.org",
        "list_email": "dts@dpdk.org",
        "web_url": "",
        "scm_url": "git://dpdk.org/tools/dts",
        "webscm_url": "http://git.dpdk.org/tools/dts/",
        "list_archive_url": "https://inbox.dpdk.org/dts",
        "list_archive_url_format": "https://inbox.dpdk.org/dts/{}",
        "commit_url_format": ""
    },
    "msgid": "<1599764335-1164-2-git-send-email-lihongx.ma@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dts/1599764335-1164-2-git-send-email-lihongx.ma@intel.com",
    "date": "2020-09-10T18:58:54",
    "name": "[V1,1/2] framework: use the default python lib threadpool to maintain thread",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": false,
    "hash": "56bf9f2e1847a7cfaab65feb725a4c9547e7ee66",
    "submitter": {
        "id": 1641,
        "url": "http://patches.dpdk.org/api/people/1641/?format=api",
        "name": "Ma, LihongX",
        "email": "lihongx.ma@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dts/patch/1599764335-1164-2-git-send-email-lihongx.ma@intel.com/mbox/",
    "series": [
        {
            "id": 12121,
            "url": "http://patches.dpdk.org/api/series/12121/?format=api",
            "web_url": "http://patches.dpdk.org/project/dts/list/?series=12121",
            "date": "2020-09-10T18:58:53",
            "name": "use the default python lib threadpool to maintain the pool of worker threads",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/12121/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/77302/comments/",
    "check": "pending",
    "checks": "http://patches.dpdk.org/api/patches/77302/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dts-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id E154FA04B5;\n\tFri, 11 Sep 2020 04:32:24 +0200 (CEST)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id CFE8F1C0CA;\n\tFri, 11 Sep 2020 04:32:24 +0200 (CEST)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n by dpdk.org (Postfix) with ESMTP id AAA47DE0\n for <dts@dpdk.org>; Fri, 11 Sep 2020 04:32:22 +0200 (CEST)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 10 Sep 2020 19:32:22 -0700",
            "from dpdk-lihong-ub1604.sh.intel.com ([10.67.118.174])\n by fmsmga004.fm.intel.com with ESMTP; 10 Sep 2020 19:32:20 -0700"
        ],
        "IronPort-SDR": [
            "\n JSTWF0NoZKgUNn3onsjmE5PlHLSmBWtreFJGWVvJQHd9Rg8cA7PNkHbKSevuGGpQ35NwaOJviU\n SnyTlQqcrtRQ==",
            "\n ES5u+ZM/tsDC7CN8QZ5UWD6+2UsUG1dEPw4mF8fts43IycnS8sHWdEtz2msFVDdL0QwV19MATO\n 867G6HRjBD5g=="
        ],
        "X-IronPort-AV": [
            "E=McAfee;i=\"6000,8403,9740\"; a=\"158714509\"",
            "E=Sophos;i=\"5.76,413,1592895600\"; d=\"scan'208\";a=\"158714509\"",
            "E=Sophos;i=\"5.76,413,1592895600\"; d=\"scan'208\";a=\"329592596\""
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "From": "LihongX Ma <lihongx.ma@intel.com>",
        "To": "dts@dpdk.org",
        "Cc": "LihongX Ma <lihongx.ma@intel.com>",
        "Date": "Fri, 11 Sep 2020 02:58:54 +0800",
        "Message-Id": "<1599764335-1164-2-git-send-email-lihongx.ma@intel.com>",
        "X-Mailer": "git-send-email 2.7.4",
        "In-Reply-To": "<1599764335-1164-1-git-send-email-lihongx.ma@intel.com>",
        "References": "<1599764335-1164-1-git-send-email-lihongx.ma@intel.com>",
        "Subject": "[dts] [PATCH V1 1/2] framework: use the default python lib\n\tthreadpool to maintain thread",
        "X-BeenThere": "dts@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "test suite reviews and discussions <dts.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dts>,\n <mailto:dts-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dts/>",
        "List-Post": "<mailto:dts@dpdk.org>",
        "List-Help": "<mailto:dts-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dts>,\n <mailto:dts-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dts-bounces@dpdk.org",
        "Sender": "\"dts\" <dts-bounces@dpdk.org>"
    },
    "content": "the function in extra_libs/threadpool.py is similar to default python lib,\nso use the default lib instead of it.\n\nSigned-off-by: LihongX Ma <lihongx.ma@intel.com>\n---\n extra_libs/threadpool.py | 426 -----------------------------------------------\n framework/dts.py         |   5 -\n 2 files changed, 431 deletions(-)\n delete mode 100644 extra_libs/threadpool.py",
    "diff": "diff --git a/extra_libs/threadpool.py b/extra_libs/threadpool.py\ndeleted file mode 100644\nindex 3839f26..0000000\n--- a/extra_libs/threadpool.py\n+++ /dev/null\n@@ -1,426 +0,0 @@\n-# -*- coding: UTF-8 -*-\n-\"\"\"Easy to use object-oriented thread pool framework.\n-\n-A thread pool is an object that maintains a pool of worker threads to perform\n-time consuming operations in parallel. It assigns jobs to the threads\n-by putting them in a work request queue, where they are picked up by the\n-next available thread. This then performs the requested operation in the\n-background and puts the results in another queue.\n-\n-The thread pool object can then collect the results from all threads from\n-this queue as soon as they become available or after all threads have\n-finished their work. It's also possible, to define callbacks to handle\n-each result as it comes in.\n-\n-The basic concept and some code was taken from the book \"Python in a Nutshell,\n-2nd edition\" by Alex Martelli, O'Reilly 2006, ISBN 0-596-10046-9, from section\n-14.5 \"Threaded Program Architecture\". I wrapped the main program logic in the\n-ThreadPool class, added the WorkRequest class and the callback system and\n-tweaked the code here and there. Kudos also to Florent Aide for the exception\n-handling mechanism.\n-\n-Basic usage::\n-\n-    >>> pool = ThreadPool(poolsize)\n-    >>> requests = makeRequests(some_callable, list_of_args, callback)\n-    >>> [pool.putRequest(req) for req in requests]\n-    >>> pool.wait()\n-\n-See the end of the module code for a brief, annotated usage example.\n-\n-Website : http://chrisarndt.de/projects/threadpool/\n-\n-\"\"\"\n-__docformat__ = \"restructuredtext en\"\n-\n-__all__ = [\n-    'makeRequests',\n-    'NoResultsPending',\n-    'NoWorkersAvailable',\n-    'ThreadPool',\n-    'WorkRequest',\n-    'WorkerThread'\n-]\n-\n-__author__ = \"Christopher Arndt\"\n-__version__ = '1.3.2'\n-__license__ = \"MIT license\"\n-\n-\n-# standard library modules\n-import sys\n-import threading\n-import traceback\n-\n-try:\n-    import Queue            # Python 2\n-except ImportError:\n-    import queue as Queue   # Python 3\n-\n-\n-# exceptions\n-class NoResultsPending(Exception):\n-    \"\"\"All work requests have been processed.\"\"\"\n-    pass\n-\n-class NoWorkersAvailable(Exception):\n-    \"\"\"No worker threads available to process remaining requests.\"\"\"\n-    pass\n-\n-\n-# internal module helper functions\n-def _handle_thread_exception(request, exc_info):\n-    \"\"\"Default exception handler callback function.\n-\n-    This just prints the exception info via ``traceback.print_exception``.\n-\n-    \"\"\"\n-    traceback.print_exception(*exc_info)\n-\n-\n-# utility functions\n-def makeRequests(callable_, args_list, callback=None,\n-        exc_callback=_handle_thread_exception):\n-    \"\"\"Create several work requests for same callable with different arguments.\n-\n-    Convenience function for creating several work requests for the same\n-    callable where each invocation of the callable receives different values\n-    for its arguments.\n-\n-    ``args_list`` contains the parameters for each invocation of callable.\n-    Each item in ``args_list`` should be either a 2-item tuple of the list of\n-    positional arguments and a dictionary of keyword arguments or a single,\n-    non-tuple argument.\n-\n-    See docstring for ``WorkRequest`` for info on ``callback`` and\n-    ``exc_callback``.\n-\n-    \"\"\"\n-    requests = []\n-    for item in args_list:\n-        if isinstance(item, tuple):\n-            requests.append(\n-                WorkRequest(callable_, item[0], item[1], callback=callback,\n-                    exc_callback=exc_callback)\n-            )\n-        else:\n-            requests.append(\n-                WorkRequest(callable_, [item], None, callback=callback,\n-                    exc_callback=exc_callback)\n-            )\n-    return requests\n-\n-\n-# classes\n-class WorkerThread(threading.Thread):\n-    \"\"\"Background thread connected to the requests/results queues.\n-\n-    A worker thread sits in the background and picks up work requests from\n-    one queue and puts the results in another until it is dismissed.\n-\n-    \"\"\"\n-\n-    def __init__(self, requests_queue, results_queue, poll_timeout=5, **kwds):\n-        \"\"\"Set up thread in daemonic mode and start it immediatedly.\n-\n-        ``requests_queue`` and ``results_queue`` are instances of\n-        ``Queue.Queue`` passed by the ``ThreadPool`` class when it creates a\n-        new worker thread.\n-\n-        \"\"\"\n-        threading.Thread.__init__(self, **kwds)\n-        self.setDaemon(1)\n-        self._requests_queue = requests_queue\n-        self._results_queue = results_queue\n-        self._poll_timeout = poll_timeout\n-        self._dismissed = threading.Event()\n-        self.start()\n-\n-    def run(self):\n-        \"\"\"Repeatedly process the job queue until told to exit.\"\"\"\n-        while True:\n-            if self._dismissed.isSet():\n-                # we are dismissed, break out of loop\n-                break\n-            # get next work request. If we don't get a new request from the\n-            # queue after self._poll_timout seconds, we jump to the start of\n-            # the while loop again, to give the thread a chance to exit.\n-            try:\n-                request = self._requests_queue.get(True, self._poll_timeout)\n-            except Queue.Empty:\n-                continue\n-            else:\n-                if self._dismissed.isSet():\n-                    # we are dismissed, put back request in queue and exit loop\n-                    self._requests_queue.put(request)\n-                    break\n-                try:\n-                    result = request.callable(*request.args, **request.kwds)\n-                    self._results_queue.put((request, result))\n-                except:\n-                    request.exception = True\n-                    self._results_queue.put((request, sys.exc_info()))\n-\n-    def dismiss(self):\n-        \"\"\"Sets a flag to tell the thread to exit when done with current job.\n-        \"\"\"\n-        self._dismissed.set()\n-\n-\n-class WorkRequest:\n-    \"\"\"A request to execute a callable for putting in the request queue later.\n-\n-    See the module function ``makeRequests`` for the common case\n-    where you want to build several ``WorkRequest`` objects for the same\n-    callable but with different arguments for each call.\n-\n-    \"\"\"\n-\n-    def __init__(self, callable_, args=None, kwds=None, requestID=None,\n-            callback=None, exc_callback=_handle_thread_exception):\n-        \"\"\"Create a work request for a callable and attach callbacks.\n-\n-        A work request consists of the a callable to be executed by a\n-        worker thread, a list of positional arguments, a dictionary\n-        of keyword arguments.\n-\n-        A ``callback`` function can be specified, that is called when the\n-        results of the request are picked up from the result queue. It must\n-        accept two anonymous arguments, the ``WorkRequest`` object and the\n-        results of the callable, in that order. If you want to pass additional\n-        information to the callback, just stick it on the request object.\n-\n-        You can also give custom callback for when an exception occurs with\n-        the ``exc_callback`` keyword parameter. It should also accept two\n-        anonymous arguments, the ``WorkRequest`` and a tuple with the exception\n-        details as returned by ``sys.exc_info()``. The default implementation\n-        of this callback just prints the exception info via\n-        ``traceback.print_exception``. If you want no exception handler\n-        callback, just pass in ``None``.\n-\n-        ``requestID``, if given, must be hashable since it is used by\n-        ``ThreadPool`` object to store the results of that work request in a\n-        dictionary. It defaults to the return value of ``id(self)``.\n-\n-        \"\"\"\n-        if requestID is None:\n-            self.requestID = id(self)\n-        else:\n-            try:\n-                self.requestID = hash(requestID)\n-            except TypeError:\n-                raise TypeError(\"requestID must be hashable.\")\n-        self.exception = False\n-        self.callback = callback\n-        self.exc_callback = exc_callback\n-        self.callable = callable_\n-        self.args = args or []\n-        self.kwds = kwds or {}\n-\n-    def __str__(self):\n-        return \"<WorkRequest id=%s args=%r kwargs=%r exception=%s>\" % \\\n-            (self.requestID, self.args, self.kwds, self.exception)\n-\n-class ThreadPool:\n-    \"\"\"A thread pool, distributing work requests and collecting results.\n-\n-    See the module docstring for more information.\n-\n-    \"\"\"\n-\n-    def __init__(self, num_workers, q_size=0, resq_size=0, poll_timeout=5):\n-        \"\"\"Set up the thread pool and start num_workers worker threads.\n-\n-        ``num_workers`` is the number of worker threads to start initially.\n-\n-        If ``q_size > 0`` the size of the work *request queue* is limited and\n-        the thread pool blocks when the queue is full and it tries to put\n-        more work requests in it (see ``putRequest`` method), unless you also\n-        use a positive ``timeout`` value for ``putRequest``.\n-\n-        If ``resq_size > 0`` the size of the *results queue* is limited and the\n-        worker threads will block when the queue is full and they try to put\n-        new results in it.\n-\n-        .. warning:\n-            If you set both ``q_size`` and ``resq_size`` to ``!= 0`` there is\n-            the possibilty of a deadlock, when the results queue is not pulled\n-            regularly and too many jobs are put in the work requests queue.\n-            To prevent this, always set ``timeout > 0`` when calling\n-            ``ThreadPool.putRequest()`` and catch ``Queue.Full`` exceptions.\n-\n-        \"\"\"\n-        self._requests_queue = Queue.Queue(q_size)\n-        self._results_queue = Queue.Queue(resq_size)\n-        self.workers = []\n-        self.dismissedWorkers = []\n-        self.workRequests = {}\n-        self.createWorkers(num_workers, poll_timeout)\n-\n-    def createWorkers(self, num_workers, poll_timeout=5):\n-        \"\"\"Add num_workers worker threads to the pool.\n-\n-        ``poll_timout`` sets the interval in seconds (int or float) for how\n-        ofte threads should check whether they are dismissed, while waiting for\n-        requests.\n-\n-        \"\"\"\n-        for i in range(num_workers):\n-            self.workers.append(WorkerThread(self._requests_queue,\n-                self._results_queue, poll_timeout=poll_timeout))\n-\n-    def dismissWorkers(self, num_workers, do_join=False):\n-        \"\"\"Tell num_workers worker threads to quit after their current task.\"\"\"\n-        dismiss_list = []\n-        for i in range(min(num_workers, len(self.workers))):\n-            worker = self.workers.pop()\n-            worker.dismiss()\n-            dismiss_list.append(worker)\n-\n-        if do_join:\n-            for worker in dismiss_list:\n-                worker.join()\n-        else:\n-            self.dismissedWorkers.extend(dismiss_list)\n-\n-    def joinAllDismissedWorkers(self):\n-        \"\"\"Perform Thread.join() on all worker threads that have been dismissed.\n-        \"\"\"\n-        for worker in self.dismissedWorkers:\n-            worker.join()\n-        self.dismissedWorkers = []\n-\n-    def putRequest(self, request, block=True, timeout=None):\n-        \"\"\"Put work request into work queue and save its id for later.\"\"\"\n-        assert isinstance(request, WorkRequest)\n-        # don't reuse old work requests\n-        assert not getattr(request, 'exception', None)\n-        self._requests_queue.put(request, block, timeout)\n-        self.workRequests[request.requestID] = request\n-\n-    def poll(self, block=False):\n-        \"\"\"Process any new results in the queue.\"\"\"\n-        while True:\n-            # still results pending?\n-            if not self.workRequests:\n-                raise NoResultsPending\n-            # are there still workers to process remaining requests?\n-            elif block and not self.workers:\n-                raise NoWorkersAvailable\n-            try:\n-                # get back next results\n-                request, result = self._results_queue.get(block=block)\n-                # has an exception occured?\n-                if request.exception and request.exc_callback:\n-                    request.exc_callback(request, result)\n-                # hand results to callback, if any\n-                if request.callback and not \\\n-                       (request.exception and request.exc_callback):\n-                    request.callback(request, result)\n-                del self.workRequests[request.requestID]\n-            except Queue.Empty:\n-                break\n-            except Exception as e:\n-                traceback.print_exception(*sys.exc_info())\n-                # unexpected thing happened, need further dedbugging\n-                import pdb\n-                pdb.set_trace()\n-\n-    def wait(self):\n-        \"\"\"Wait for results, blocking until all have arrived.\"\"\"\n-        while 1:\n-            try:\n-                self.poll(True)\n-            except NoResultsPending:\n-                break\n-\n-\n-################\n-# USAGE EXAMPLE\n-################\n-\n-if __name__ == '__main__':\n-    import random\n-    import time\n-\n-    # the work the threads will have to do (rather trivial in our example)\n-    def do_something(data):\n-        time.sleep(random.randint(1,5))\n-        result = round(random.random() * data, 5)\n-        # just to show off, we throw an exception once in a while\n-        if result > 5:\n-            raise RuntimeError(\"Something extraordinary happened!\")\n-        return result\n-\n-    # this will be called each time a result is available\n-    def print_result(request, result):\n-        print(\"**** Result from request #%s: %r\" % (request.requestID, result))\n-\n-    # this will be called when an exception occurs within a thread\n-    # this example exception handler does little more than the default handler\n-    def handle_exception(request, exc_info):\n-        if not isinstance(exc_info, tuple):\n-            # Something is seriously wrong...\n-            print(request)\n-            print(exc_info)\n-            raise SystemExit\n-        print(\"**** Exception occured in request #%s: %s\" % \\\n-          (request.requestID, exc_info))\n-\n-    # assemble the arguments for each job to a list...\n-    data = [random.randint(1,10) for i in range(20)]\n-    # ... and build a WorkRequest object for each item in data\n-    requests = makeRequests(do_something, data, print_result, handle_exception)\n-    # to use the default exception handler, uncomment next line and comment out\n-    # the preceding one.\n-    #requests = makeRequests(do_something, data, print_result)\n-\n-    # or the other form of args_lists accepted by makeRequests: ((,), {})\n-    data = [((random.randint(1,10),), {}) for i in range(20)]\n-    requests.extend(\n-        makeRequests(do_something, data, print_result, handle_exception)\n-        #makeRequests(do_something, data, print_result)\n-        # to use the default exception handler, uncomment next line and comment\n-        # out the preceding one.\n-    )\n-\n-    # we create a pool of 3 worker threads\n-    print(\"Creating thread pool with 3 worker threads.\")\n-    main = ThreadPool(3)\n-\n-    # then we put the work requests in the queue...\n-    for req in requests:\n-        main.putRequest(req)\n-        print(\"Work request #%s added.\" % req.requestID)\n-    # or shorter:\n-    # [main.putRequest(req) for req in requests]\n-\n-    # ...and wait for the results to arrive in the result queue\n-    # by using ThreadPool.wait(). This would block until results for\n-    # all work requests have arrived:\n-    # main.wait()\n-\n-    # instead we can poll for results while doing something else:\n-    i = 0\n-    while True:\n-        try:\n-            time.sleep(0.5)\n-            main.poll()\n-            print(\"Main thread working...\")\n-            print(\"(active worker threads: %i)\" % (threading.activeCount()-1, ))\n-            if i == 10:\n-                print(\"**** Adding 3 more worker threads...\")\n-                main.createWorkers(3)\n-            if i == 20:\n-                print(\"**** Dismissing 2 worker threads...\")\n-                main.dismissWorkers(2)\n-            i += 1\n-        except KeyboardInterrupt:\n-            print(\"**** Interrupted!\")\n-            break\n-        except NoResultsPending:\n-            print(\"**** No pending results.\")\n-            break\n-    if main.dismissedWorkers:\n-        print(\"Joining all dismissed worker threads...\")\n-        main.joinAllDismissedWorkers()\ndiff --git a/framework/dts.py b/framework/dts.py\nindex 2d86bc3..1a08d6d 100644\n--- a/framework/dts.py\n+++ b/framework/dts.py\n@@ -514,11 +514,6 @@ def run_all(config_file, pkgName, git, patch, skip_setup,\n     if not os.path.exists(output_dir):\n         os.mkdir(output_dir)\n \n-    # add external library\n-    exec_file = os.path.realpath(__file__)\n-    extra_libs_path = exec_file.replace('framework/dts.py', '') + 'extra_libs'\n-    sys.path.insert(1, extra_libs_path)\n-\n     # add python module search path\n     sys.path.append(suite_dir)\n \n",
    "prefixes": [
        "V1",
        "1/2"
    ]
}