get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/85255/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 85255,
    "url": "http://patches.dpdk.org/api/patches/85255/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20201216164931.1517-5-ophirmu@nvidia.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20201216164931.1517-5-ophirmu@nvidia.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20201216164931.1517-5-ophirmu@nvidia.com",
    "date": "2020-12-16T16:49:29",
    "name": "[v1,4/6] app/regex: support multi cores",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "2659c54b46732bb2a90f55ee9e3f4504b6f5adfc",
    "submitter": {
        "id": 1908,
        "url": "http://patches.dpdk.org/api/people/1908/?format=api",
        "name": "Ophir Munk",
        "email": "ophirmu@nvidia.com"
    },
    "delegate": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/users/1/?format=api",
        "username": "tmonjalo",
        "first_name": "Thomas",
        "last_name": "Monjalon",
        "email": "thomas@monjalon.net"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20201216164931.1517-5-ophirmu@nvidia.com/mbox/",
    "series": [
        {
            "id": 14334,
            "url": "http://patches.dpdk.org/api/series/14334/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=14334",
            "date": "2020-12-16T16:49:28",
            "name": "regex multi Q with multi cores support",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/14334/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/85255/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/85255/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from dpdk.org (dpdk.org [92.243.14.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id A6098A09F0;\n\tWed, 16 Dec 2020 17:51:00 +0100 (CET)",
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id F1361C9F4;\n\tWed, 16 Dec 2020 17:50:08 +0100 (CET)",
            "from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129])\n by dpdk.org (Postfix) with ESMTP id B0206C9D6\n for <dev@dpdk.org>; Wed, 16 Dec 2020 17:50:04 +0100 (CET)",
            "from Internal Mail-Server by MTLPINE1 (envelope-from\n ophirmu@nvidia.com) with SMTP; 16 Dec 2020 18:49:58 +0200",
            "from nvidia.com (pegasus05.mtr.labs.mlnx [10.210.16.100])\n by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id 0BGGnvcv005924;\n Wed, 16 Dec 2020 18:49:57 +0200"
        ],
        "From": "Ophir Munk <ophirmu@nvidia.com>",
        "To": "Ori Kam <orika@nvidia.com>, dev@dpdk.org,\n Raslan Darawsheh <rasland@nvidia.com>",
        "Cc": "Ophir Munk <ophirmu@nvidia.com>, Thomas Monjalon <thomas@monjalon.net>",
        "Date": "Wed, 16 Dec 2020 16:49:29 +0000",
        "Message-Id": "<20201216164931.1517-5-ophirmu@nvidia.com>",
        "X-Mailer": "git-send-email 2.8.4",
        "In-Reply-To": "<20201216164931.1517-1-ophirmu@nvidia.com>",
        "References": "<20201216164931.1517-1-ophirmu@nvidia.com>",
        "Subject": "[dpdk-dev] [PATCH v1 4/6] app/regex: support multi cores",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "Up to this commit the regex application was running with multiple QPs on\na single core.  This commit adds the option to specify a number of cores\non which multiple QPs will run.\nA new parameter 'nb_lcores' was added to configure the number of cores:\n--nb_lcores <num of cores>.\nIf not configured the number of cores is set to 1 by default.  On\napplication startup a few initial steps occur by the main core: the\nnumber of QPs and cores are parsed.  The QPs are distributed as evenly\nas possible on the cores.  The regex device and all QPs are initialized.\nThe data file is read and saved in a buffer. Then for each core the\napplication calls rte_eal_remote_launch() with the worker routine\n(run_regex) as its parameter.\n\nSigned-off-by: Ophir Munk <ophirmu@nvidia.com>\n---\n app/test-regex/main.c | 155 ++++++++++++++++++++++++++++++++++++++++++++------\n 1 file changed, 139 insertions(+), 16 deletions(-)",
    "diff": "diff --git a/app/test-regex/main.c b/app/test-regex/main.c\nindex 9bafd02..720eb1c 100644\n--- a/app/test-regex/main.c\n+++ b/app/test-regex/main.c\n@@ -34,6 +34,7 @@ enum app_args {\n \tARG_PERF_MODE,\n \tARG_NUM_OF_ITERATIONS,\n \tARG_NUM_OF_QPS,\n+\tARG_NUM_OF_LCORES,\n };\n \n struct job_ctx {\n@@ -49,6 +50,26 @@ struct qp_params {\n \tchar *buf;\n };\n \n+struct qps_per_lcore {\n+\tunsigned int lcore_id;\n+\tint socket;\n+\tuint16_t qp_id_base;\n+\tuint16_t nb_qps;\n+};\n+\n+struct regex_conf {\n+\tuint32_t nb_jobs;\n+\tbool perf_mode;\n+\tuint32_t nb_iterations;\n+\tchar *data_file;\n+\tuint8_t nb_max_matches;\n+\tuint32_t nb_qps;\n+\tuint16_t qp_id_base;\n+\tchar *data_buf;\n+\tlong data_len;\n+\tlong job_len;\n+};\n+\n static void\n usage(const char *prog_name)\n {\n@@ -58,14 +79,15 @@ usage(const char *prog_name)\n \t\t\" --nb_jobs: number of jobs to use\\n\"\n \t\t\" --perf N: only outputs the performance data\\n\"\n \t\t\" --nb_iter N: number of iteration to run\\n\"\n-\t\t\" --nb_qps N: number of queues to use\\n\",\n+\t\t\" --nb_qps N: number of queues to use\\n\"\n+\t\t\" --nb_lcores N: number of lcores to use\\n\",\n \t\tprog_name);\n }\n \n static void\n args_parse(int argc, char **argv, char *rules_file, char *data_file,\n \t   uint32_t *nb_jobs, bool *perf_mode, uint32_t *nb_iterations,\n-\t   uint32_t *nb_qps)\n+\t   uint32_t *nb_qps, uint32_t *nb_lcores)\n {\n \tchar **argvopt;\n \tint opt;\n@@ -85,6 +107,8 @@ args_parse(int argc, char **argv, char *rules_file, char *data_file,\n \t\t{ \"nb_iter\", 1, 0, ARG_NUM_OF_ITERATIONS},\n \t\t/* Number of QPs. */\n \t\t{ \"nb_qps\", 1, 0, ARG_NUM_OF_QPS},\n+\t\t/* Number of lcores. */\n+\t\t{ \"nb_lcores\", 1, 0, ARG_NUM_OF_LCORES},\n \t\t/* End of options */\n \t\t{ 0, 0, 0, 0 }\n \t};\n@@ -121,6 +145,9 @@ args_parse(int argc, char **argv, char *rules_file, char *data_file,\n \t\tcase ARG_NUM_OF_QPS:\n \t\t\t*nb_qps = atoi(optarg);\n \t\t\tbreak;\n+\t\tcase ARG_NUM_OF_LCORES:\n+\t\t\t*nb_lcores = atoi(optarg);\n+\t\t\tbreak;\n \t\tcase ARG_HELP:\n \t\t\tusage(\"RegEx test app\");\n \t\t\tbreak;\n@@ -274,11 +301,18 @@ extbuf_free_cb(void *addr __rte_unused, void *fcb_opaque __rte_unused)\n }\n \n static int\n-run_regex(uint32_t nb_jobs,\n-\t  bool perf_mode, uint32_t nb_iterations,\n-\t  uint8_t nb_max_matches, uint32_t nb_qps,\n-\t  char *data_buf, long data_len, long job_len)\n+run_regex(void *args)\n {\n+\tstruct regex_conf *rgxc = args;\n+\tuint32_t nb_jobs = rgxc->nb_jobs;\n+\tuint32_t nb_iterations = rgxc->nb_iterations;\n+\tuint8_t nb_max_matches = rgxc->nb_max_matches;\n+\tuint32_t nb_qps = rgxc->nb_qps;\n+\tuint16_t qp_id_base  = rgxc->qp_id_base;\n+\tchar *data_buf = rgxc->data_buf;\n+\tlong data_len = rgxc->data_len;\n+\tlong job_len = rgxc->job_len;\n+\n \tchar *buf = NULL;\n \tuint32_t actual_jobs = 0;\n \tuint32_t i;\n@@ -298,9 +332,13 @@ run_regex(uint32_t nb_jobs,\n \tstruct qp_params *qps = NULL;\n \tbool update;\n \tuint16_t qps_used = 0;\n+\tchar mbuf_pool[16];\n \n \tshinfo.free_cb = extbuf_free_cb;\n-\tmbuf_mp = rte_pktmbuf_pool_create(\"mbuf_pool\", nb_jobs * nb_qps, 0,\n+\tsnprintf(mbuf_pool,\n+\t\t sizeof(mbuf_pool),\n+\t\t \"mbuf_pool_%2u\", qp_id_base);\n+\tmbuf_mp = rte_pktmbuf_pool_create(mbuf_pool, nb_jobs * nb_qps, 0,\n \t\t\t0, MBUF_SIZE, rte_socket_id());\n \tif (mbuf_mp == NULL) {\n \t\tprintf(\"Error, can't create memory pool\\n\");\n@@ -402,7 +440,7 @@ run_regex(uint32_t nb_jobs,\n \t\t\t\t\t\tqp->total_enqueue +=\n \t\t\t\t\t\trte_regexdev_enqueue_burst\n \t\t\t\t\t\t\t(dev_id,\n-\t\t\t\t\t\t\tqp_id,\n+\t\t\t\t\t\t\tqp_id_base + qp_id,\n \t\t\t\t\t\t\tcur_ops_to_enqueue,\n \t\t\t\t\t\t\tactual_jobs -\n \t\t\t\t\t\t\tqp->total_enqueue);\n@@ -418,7 +456,7 @@ run_regex(uint32_t nb_jobs,\n \t\t\t\t\tqp->total_dequeue +=\n \t\t\t\t\t\trte_regexdev_dequeue_burst\n \t\t\t\t\t\t\t(dev_id,\n-\t\t\t\t\t\t\tqp_id,\n+\t\t\t\t\t\t\tqp_id_base + qp_id,\n \t\t\t\t\t\t\tcur_ops_to_dequeue,\n \t\t\t\t\t\t\tqp->total_enqueue -\n \t\t\t\t\t\t\tqp->total_dequeue);\n@@ -435,7 +473,7 @@ run_regex(uint32_t nb_jobs,\n \t       (((double)actual_jobs * job_len * nb_iterations * 8) / time) /\n \t\t1000000000.0);\n \n-\tif (perf_mode)\n+\tif (rgxc->perf_mode)\n \t\tgoto end;\n \tfor (qp_id = 0; qp_id < nb_qps; qp_id++) {\n \t\tprintf(\"\\n############ QP id=%u ############\\n\", qp_id);\n@@ -491,6 +529,67 @@ run_regex(uint32_t nb_jobs,\n \treturn res;\n }\n \n+static int\n+distribute_qps_to_lcores(uint32_t nb_cores, uint32_t nb_qps,\n+\t\t\t struct qps_per_lcore **qpl)\n+{\n+\tint socket;\n+\tunsigned lcore_id;\n+\tuint32_t i;\n+\tuint16_t min_qp_id;\n+\tuint16_t max_qp_id;\n+\tstruct qps_per_lcore *qps_per_lcore;\n+\tuint32_t detected_lcores;\n+\n+\tif (nb_qps < nb_cores) {\n+\t\tnb_cores = nb_qps;\n+\t\tprintf(\"Reducing number of cores to number of QPs (%u)\\n\",\n+\t\t       nb_cores);\n+\t}\n+\t/* Allocate qps_per_lcore array */\n+\tqps_per_lcore =\n+\t\trte_malloc(NULL, sizeof(*qps_per_lcore) * nb_cores, 0);\n+\tif (!qps_per_lcore)\n+\t\trte_exit(EXIT_FAILURE, \"Falied to create qps_per_lcore array\\n\");\n+\t*qpl = qps_per_lcore;\n+\tdetected_lcores = 0;\n+\tmin_qp_id = 0;\n+\n+\tRTE_LCORE_FOREACH_WORKER(lcore_id) {\n+\t\tif (detected_lcores >= nb_cores)\n+\t\t\tbreak;\n+\t\tqps_per_lcore[detected_lcores].lcore_id = lcore_id;\n+\t\tsocket = rte_lcore_to_socket_id(lcore_id);\n+\t\tif (socket == SOCKET_ID_ANY)\n+\t\t\tsocket = 0;\n+\t\tqps_per_lcore[detected_lcores].socket = socket;\n+\t\tqps_per_lcore[detected_lcores].qp_id_base = min_qp_id;\n+\t\tmax_qp_id = min_qp_id + nb_qps / nb_cores - 1;\n+\t\tif (nb_qps % nb_cores > detected_lcores)\n+\t\t\tmax_qp_id++;\n+\t\tqps_per_lcore[detected_lcores].nb_qps = max_qp_id -\n+\t\t\t\t\t\t\tmin_qp_id + 1;\n+\t\tmin_qp_id = max_qp_id + 1;\n+\t\tdetected_lcores++;\n+\t}\n+\tif (detected_lcores != nb_cores)\n+\t\treturn -1;\n+\n+\tfor (i = 0; i < detected_lcores; i++) {\n+\t\tprintf(\"===> Core %d: allocated queues: \",\n+\t\t       qps_per_lcore[i].lcore_id);\n+\t\tmin_qp_id = qps_per_lcore[i].qp_id_base;\n+\t\tmax_qp_id =\n+\t\t\tqps_per_lcore[i].qp_id_base + qps_per_lcore[i].nb_qps;\n+\t\twhile (min_qp_id < max_qp_id) {\n+\t\t\tprintf(\"%u \", min_qp_id);\n+\t\t\tmin_qp_id++;\n+\t\t}\n+\t\tprintf(\"\\n\");\n+\t}\n+\treturn 0;\n+}\n+\n int\n main(int argc, char **argv)\n {\n@@ -506,6 +605,10 @@ main(int argc, char **argv)\n \tchar *data_buf;\n \tlong data_len;\n \tlong job_len;\n+\tuint32_t nb_lcores = 1;\n+\tstruct regex_conf *rgxc;\n+\tuint32_t i;\n+\tstruct qps_per_lcore *qps_per_lcore;\n \n \t/* Init EAL. */\n \tret = rte_eal_init(argc, argv);\n@@ -515,10 +618,15 @@ main(int argc, char **argv)\n \targv += ret;\n \tif (argc > 1)\n \t\targs_parse(argc, argv, rules_file, data_file, &nb_jobs,\n-\t\t\t\t&perf_mode, &nb_iterations, &nb_qps);\n+\t\t\t\t&perf_mode, &nb_iterations, &nb_qps,\n+\t\t\t\t&nb_lcores);\n \n \tif (nb_qps == 0)\n \t\trte_exit(EXIT_FAILURE, \"Number of QPs must be greater than 0\\n\");\n+\tif (nb_lcores == 0)\n+\t\trte_exit(EXIT_FAILURE, \"Number of lcores must be greater than 0\\n\");\n+\tif (distribute_qps_to_lcores(nb_lcores, nb_qps, &qps_per_lcore) < 0)\n+\t\trte_exit(EXIT_FAILURE, \"Failed to distribute queues to lcores!\\n\");\n \tret = init_port(&nb_max_payload, rules_file,\n \t\t\t&nb_max_matches, nb_qps);\n \tif (ret < 0)\n@@ -535,12 +643,27 @@ main(int argc, char **argv)\n \tif (job_len > nb_max_payload)\n \t\trte_exit(EXIT_FAILURE, \"Error, not enough jobs to cover input.\\n\");\n \n-\tret = run_regex(nb_jobs, perf_mode,\n-\t\t\tnb_iterations, nb_max_matches, nb_qps,\n-\t\t\tdata_buf, data_len, job_len);\n-\tif (ret < 0) {\n-\t\trte_exit(EXIT_FAILURE, \"RegEx function failed\\n\");\n+\trgxc = rte_malloc(NULL, sizeof(*rgxc) * nb_lcores, 0);\n+\tif (!rgxc)\n+\t\trte_exit(EXIT_FAILURE, \"Falied to create Regex Conf\\n\");\n+\tfor (i = 0; i < nb_lcores; i++) {\n+\t\trgxc[i] = (struct regex_conf){\n+\t\t\t.nb_jobs = nb_jobs,\n+\t\t\t.perf_mode = perf_mode,\n+\t\t\t.nb_iterations = nb_iterations,\n+\t\t\t.nb_max_matches = nb_max_matches,\n+\t\t\t.nb_qps = qps_per_lcore[i].nb_qps,\n+\t\t\t.qp_id_base = qps_per_lcore[i].qp_id_base,\n+\t\t\t.data_buf = data_buf,\n+\t\t\t.data_len = data_len,\n+\t\t\t.job_len = job_len,\n+\t\t};\n+\t\trte_eal_remote_launch(run_regex, &rgxc[i],\n+\t\t\t\t      qps_per_lcore[i].lcore_id);\n \t}\n+\trte_eal_mp_wait_lcore();\n \trte_free(data_buf);\n+\trte_free(rgxc);\n+\trte_free(qps_per_lcore);\n \treturn EXIT_SUCCESS;\n }\n",
    "prefixes": [
        "v1",
        "4/6"
    ]
}