get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/41451/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 41451,
    "url": "http://patches.dpdk.org/api/patches/41451/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20180625071745.16810-8-qi.z.zhang@intel.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20180625071745.16810-8-qi.z.zhang@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20180625071745.16810-8-qi.z.zhang@intel.com",
    "date": "2018-06-25T07:17:29",
    "name": "[v3,07/23] ethdev: support attach or detach share device from secondary",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "6281738daf34fed56b987cf4c0d4946784439b04",
    "submitter": {
        "id": 504,
        "url": "http://patches.dpdk.org/api/people/504/?format=api",
        "name": "Qi Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20180625071745.16810-8-qi.z.zhang@intel.com/mbox/",
    "series": [
        {
            "id": 221,
            "url": "http://patches.dpdk.org/api/series/221/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=221",
            "date": "2018-06-25T07:17:22",
            "name": "enable hotplug on multi-process",
            "version": 3,
            "mbox": "http://patches.dpdk.org/series/221/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/41451/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/41451/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id B2BE55F2E;\n\tMon, 25 Jun 2018 09:17:44 +0200 (CEST)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n\tby dpdk.org (Postfix) with ESMTP id E72685F25\n\tfor <dev@dpdk.org>; Mon, 25 Jun 2018 09:17:41 +0200 (CEST)",
            "from fmsmga001.fm.intel.com ([10.253.24.23])\n\tby orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384;\n\t25 Jun 2018 00:17:40 -0700",
            "from dpdk51.sh.intel.com ([10.67.110.190])\n\tby fmsmga001.fm.intel.com with ESMTP; 25 Jun 2018 00:17:23 -0700"
        ],
        "X-Amp-Result": "SKIPPED(no attachment in message)",
        "X-Amp-File-Uploaded": "False",
        "X-ExtLoop1": "1",
        "X-IronPort-AV": "E=Sophos;i=\"5.51,269,1526367600\"; d=\"scan'208\";a=\"66973167\"",
        "From": "Qi Zhang <qi.z.zhang@intel.com>",
        "To": "thomas@monjalon.net,\n\tanatoly.burakov@intel.com",
        "Cc": "konstantin.ananyev@intel.com, dev@dpdk.org, bruce.richardson@intel.com, \n\tferruh.yigit@intel.com, benjamin.h.shelton@intel.com,\n\tnarender.vangati@intel.com, Qi Zhang <qi.z.zhang@intel.com>",
        "Date": "Mon, 25 Jun 2018 15:17:29 +0800",
        "Message-Id": "<20180625071745.16810-8-qi.z.zhang@intel.com>",
        "X-Mailer": "git-send-email 2.13.6",
        "In-Reply-To": "<20180625071745.16810-1-qi.z.zhang@intel.com>",
        "References": "<20180607123849.14439-1-qi.z.zhang@intel.com>\n\t<20180625071745.16810-1-qi.z.zhang@intel.com>",
        "Subject": "[dpdk-dev] [PATCH v3 07/23] ethdev: support attach or detach share\n\tdevice from secondary",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch cover the multi-process hotplug case when a share device\nattach/detach request be issued from secondary process, the implementation\nreferences malloc_mp.c.\n\ndevice attach on secondary:\na) seconary send asycn request to primary and wait on a condition\n   which will be released by matched response from primary.\nb) primary receive the request and attach the new device if failed\n   goto i).\nc) primary forward attach request to all secondary as async request\n   (because this in mp thread context, use sync request will deadlock)\nd) secondary receive request and attach device and send reply.\ne) primary check the reply if all success go to j).\nf) primary send attach rollback async request to all secondary.\ng) secondary receive the request and detach device and send reply.\nh) primary receive the reply and detach device as rollback action.\ni) send fail response to secondary, goto k).\nj) send success response to secondary.\nk) secondary process receive response and return.\n\ndevice detach on secondary:\na) secondary send async request to primary and wait on a condition\n   which will be released by matched response from primary.\nb) primary receive the request and  perform pre-detach check, if device\n   is locked, goto j).\nc) primary send pre-detach async request to all secondary.\nd) secondary perform pre-detach check and send reply.\ne) primary check the reply if any fail goto j).\nf) primary send detach async request to all secondary\ng) secondary detach the device and send reply\nh) primary detach the device.\ni) send success response to secondary, goto k).\nj) send fail response to secondary.\nk) secondary process receive response and return.\n\nSigned-off-by: Qi Zhang <qi.z.zhang@intel.com>\n---\n lib/librte_ethdev/ethdev_mp.c | 513 +++++++++++++++++++++++++++++++++++++++++-\n lib/librte_ethdev/ethdev_mp.h |   1 +\n 2 files changed, 504 insertions(+), 10 deletions(-)",
    "diff": "diff --git a/lib/librte_ethdev/ethdev_mp.c b/lib/librte_ethdev/ethdev_mp.c\nindex b00c05c23..af8cec8c0 100644\n--- a/lib/librte_ethdev/ethdev_mp.c\n+++ b/lib/librte_ethdev/ethdev_mp.c\n@@ -3,12 +3,103 @@\n  */\n \n #include <rte_string_fns.h>\n+#include <sys/time.h>\n+\n+#include <rte_alarm.h>\n+\n #include \"rte_ethdev_driver.h\"\n #include \"ethdev_mp.h\"\n #include \"ethdev_lock.h\"\n+#include \"ethdev_private.h\"\n+\n+/**\n+ * secondary to primary request.\n+ * start from function eth_dev_request_to_primary.\n+ *\n+ * device attach:\n+ * a) seconary send request to primary.\n+ * b) primary attach the new device if failed goto i).\n+ * c) primary forward attach request to all secondary.\n+ * d) secondary receive request and attach device and send reply.\n+ * e) primary check the reply if all success go to j).\n+ * f) primary send attach rollback request to all secondary.\n+ * g) secondary receive the request and detach device and send reply.\n+ * h) primary receive the reply and detach device as rollback action.\n+ * i) send fail response to secondary, goto k).\n+ * j) send success response to secondary.\n+ * k) end.\n+\n+ * device detach:\n+ * a) secondary send request to primary.\n+ * b) primary perform pre-detach check, if device is locked, got j).\n+ * c) primary send pre-detach check request to all secondary.\n+ * d) secondary perform pre-detach check and send reply.\n+ * e) primary check the reply if any fail goto j).\n+ * f) primary send detach request to all secondary\n+ * g) secondary detach the device and send reply\n+ * h) primary detach the device.\n+ * i) send success response to secondary, goto k).\n+ * j) send fail response to secondary.\n+ * k) end.\n+ */\n+\n+enum req_state {\n+\tREQ_STATE_INACTIVE = 0,\n+\tREQ_STATE_ACTIVE,\n+\tREQ_STATE_COMPLETE\n+};\n+\n+struct mp_request {\n+\tTAILQ_ENTRY(mp_request) next;\n+\tstruct eth_dev_mp_req user_req; /**< contents of request */\n+\tpthread_cond_t cond; /**< variable we use to time out on this request */\n+\tenum req_state state; /**< indicate status of this request */\n+};\n+\n+/*\n+ * We could've used just a single request, but it may be possible for\n+ * secondaries to timeout earlier than the primary, and send a new request while\n+ * primary is still expecting replies to the old one. Therefore, each new\n+ * request will get assigned a new ID, which is how we will distinguish between\n+ * expected and unexpected messages.\n+ */\n+TAILQ_HEAD(mp_request_list, mp_request);\n+static struct {\n+\tstruct mp_request_list list;\n+\tpthread_mutex_t lock;\n+} mp_request_list = {\n+\t.list = TAILQ_HEAD_INITIALIZER(mp_request_list.list),\n+\t.lock = PTHREAD_MUTEX_INITIALIZER\n+};\n \n #define MP_TIMEOUT_S 5 /**< 5 seconds timeouts */\n \n+static struct mp_request *\n+find_request_by_id(uint64_t id)\n+{\n+\tstruct mp_request *req;\n+\n+\tTAILQ_FOREACH(req, &mp_request_list.list, next) {\n+\t\tif (req->user_req.id == id)\n+\t\t\tbreak;\n+\t}\n+\treturn req;\n+}\n+\n+static uint64_t\n+get_unique_id(void)\n+{\n+\tuint64_t id;\n+\n+\tdo {\n+\t\tid = rte_rand();\n+\t} while (find_request_by_id(id) != NULL);\n+\treturn id;\n+}\n+\n+static int\n+send_request_to_secondary_async(const struct eth_dev_mp_req *req);\n+\n static int detach_on_secondary(uint16_t port_id)\n {\n \tstruct rte_device *dev;\n@@ -78,19 +169,355 @@ static int attach_on_secondary(const char *devargs, uint16_t port_id)\n }\n \n static int\n-handle_secondary_request(const struct rte_mp_msg *msg, const void *peer)\n+check_reply(const struct eth_dev_mp_req *req, const struct rte_mp_reply *reply)\n+{\n+\tstruct eth_dev_mp_req *resp;\n+\tint i;\n+\n+\tif (reply->nb_received != reply->nb_sent)\n+\t\treturn -EINVAL;\n+\n+\tfor (i = 0; i < reply->nb_received; i++) {\n+\t\tresp = (struct eth_dev_mp_req *)reply->msgs[i].param;\n+\n+\t\tif (resp->t != req->t) {\n+\t\t\tethdev_log(ERR, \"Unexpected response to async request\\n\");\n+\t\t\treturn -EINVAL;\n+\t\t}\n+\n+\t\tif (resp->id != req->id) {\n+\t\t\tethdev_log(ERR, \"response to wrong async request\\n\");\n+\t\t\treturn -ENOENT;\n+\t\t}\n+\n+\t\tif (resp->result)\n+\t\t\treturn resp->result;\n+\t}\n+\n+\treturn 0;\n+}\n+\n+static int\n+send_response_to_secondary(const struct eth_dev_mp_req *req, int result)\n+{\n+\tstruct rte_mp_msg resp_msg;\n+\tstruct eth_dev_mp_req *resp =\n+\t\t(struct eth_dev_mp_req *)resp_msg.param;\n+\tint ret = 0;\n+\n+\tmemset(&resp_msg, 0, sizeof(resp_msg));\n+\tresp_msg.len_param = sizeof(*resp);\n+\tstrcpy(resp_msg.name, ETH_DEV_MP_ACTION_RESPONSE);\n+\tmemcpy(resp, req, sizeof(*req));\n+\tresp->result = result;\n+\n+\tret = rte_mp_sendmsg(&resp_msg);\n+\tif (ret)\n+\t\tethdev_log(ERR, \"failed to send response to secondary\\n\");\n+\n+\treturn ret;\n+}\n+\n+static int\n+handle_async_attach_response(const struct rte_mp_msg *request,\n+\t\t\t     const struct rte_mp_reply *reply)\n+{\n+\tconst struct eth_dev_mp_req *req =\n+\t\t(const struct eth_dev_mp_req *)request->param;\n+\tstruct mp_request *entry;\n+\tstruct eth_dev_mp_req tmp_req;\n+\tint ret = 0;\n+\n+\tpthread_mutex_lock(&mp_request_list.lock);\n+\n+\tentry = find_request_by_id(req->id);\n+\tif (!entry) {\n+\t\tethdev_log(ERR, \"wrong request ID\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto finish;\n+\t}\n+\n+\tret = check_reply(req, reply);\n+\tif (ret) {\n+\t\ttmp_req = *req;\n+\t\ttmp_req.t = REQ_TYPE_ATTACH_ROLLBACK;\n+\n+\t\tret = send_request_to_secondary_async(&tmp_req);\n+\t\tif (ret) {\n+\t\t\tethdev_log(ERR, \"couldn't send async request\\n\");\n+\t\t\tTAILQ_REMOVE(&mp_request_list.list, entry, next);\n+\t\t\tfree(entry);\n+\t\t}\n+\t} else {\n+\t\tsend_response_to_secondary(req, 0);\n+\t\tTAILQ_REMOVE(&mp_request_list.list, entry, next);\n+\t\tfree(entry);\n+\t}\n+\n+finish:\n+\tpthread_mutex_unlock(&mp_request_list.lock);\n+\treturn ret;\n+}\n+\n+static int\n+handle_async_detach_response(const struct rte_mp_msg *request,\n+\t\t\tconst struct rte_mp_reply *reply)\n {\n-\tRTE_SET_USED(msg);\n-\tRTE_SET_USED(peer);\n-\treturn -ENOTSUP;\n+\tconst struct eth_dev_mp_req *req =\n+\t\t(const struct eth_dev_mp_req *)request->param;\n+\tstruct mp_request *entry;\n+\tint ret = 0;\n+\n+\tpthread_mutex_lock(&mp_request_list.lock);\n+\n+\tentry = find_request_by_id(req->id);\n+\tif (!entry) {\n+\t\tethdev_log(ERR, \"wrong request ID\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto finish;\n+\t}\n+\n+\tret = check_reply(req, reply);\n+\tif (ret) {\n+\t\tsend_response_to_secondary(req, ret);\n+\t} else {\n+\t\tdo_eth_dev_detach(req->port_id);\n+\t\tsend_response_to_secondary(req, 0);\n+\t}\n+\tTAILQ_REMOVE(&mp_request_list.list, entry, next);\n+\tfree(entry);\n+\n+finish:\n+\tpthread_mutex_unlock(&mp_request_list.lock);\n+\treturn ret;\n }\n \n static int\n-handle_primary_response(const struct rte_mp_msg *msg, const void *peer)\n+handle_async_pre_detach_response(const struct rte_mp_msg *request,\n+\t\t\t\tconst struct rte_mp_reply *reply)\n {\n-\tRTE_SET_USED(msg);\n-\tRTE_SET_USED(peer);\n-\treturn -ENOTSUP;\n+\tconst struct eth_dev_mp_req *req =\n+\t\t(const struct eth_dev_mp_req *)request->param;\n+\tstruct eth_dev_mp_req tmp_req;\n+\tstruct mp_request *entry;\n+\tint ret = 0;\n+\n+\tpthread_mutex_lock(&mp_request_list.lock);\n+\n+\tentry = find_request_by_id(req->id);\n+\tif (!entry) {\n+\t\tethdev_log(ERR, \"wrong request ID\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto finish;\n+\t}\n+\n+\tret = check_reply(req, reply);\n+\tif (!ret) {\n+\t\ttmp_req = *req;\n+\t\ttmp_req.t = REQ_TYPE_DETACH;\n+\n+\t\tret = send_request_to_secondary_async(&tmp_req);\n+\t\tif (ret) {\n+\t\t\tethdev_log(ERR, \"couldn't send async request\\n\");\n+\t\t\tTAILQ_REMOVE(&mp_request_list.list, entry, next);\n+\t\t\tfree(entry);\n+\t\t}\n+\t} else {\n+\t\tsend_response_to_secondary(req, ret);\n+\t\tTAILQ_REMOVE(&mp_request_list.list, entry, next);\n+\t\tfree(entry);\n+\t}\n+\n+finish:\n+\tpthread_mutex_unlock(&mp_request_list.lock);\n+\treturn 0;\n+}\n+\n+static int\n+handle_async_rollback_response(const struct rte_mp_msg *request,\n+\t\t\t\tconst struct rte_mp_reply *reply __rte_unused)\n+{\n+\tconst struct eth_dev_mp_req *req =\n+\t\t(const struct eth_dev_mp_req *)request->param;\n+\tstruct mp_request *entry;\n+\tint ret = 0;\n+\n+\tpthread_mutex_lock(&mp_request_list.lock);\n+\n+\tentry = find_request_by_id(req->id);\n+\tif (!entry) {\n+\t\tethdev_log(ERR, \"wrong request ID\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto finish;\n+\t}\n+\n+\t/* we have nothing to do if rollback still fail, just detach */\n+\tdo_eth_dev_detach(req->port_id);\n+\t/* send response to secondary with the reason of rollback */\n+\tsend_response_to_secondary(req, req->result);\n+\tTAILQ_REMOVE(&mp_request_list.list, entry, next);\n+\tfree(entry);\n+\n+finish:\n+\tpthread_mutex_unlock(&mp_request_list.lock);\n+\treturn ret;\n+}\n+\n+static int\n+send_request_to_secondary_async(const struct eth_dev_mp_req *req)\n+{\n+\tstruct timespec ts = {.tv_sec = MP_TIMEOUT_S, .tv_nsec = 0};\n+\tstruct rte_mp_msg mp_req;\n+\trte_mp_async_reply_t clb;\n+\tint ret = 0;\n+\n+\tmemset(&mp_req, 0, sizeof(mp_req));\n+\tmemcpy(mp_req.param, req, sizeof(*req));\n+\tmp_req.len_param = sizeof(*req);\n+\tstrcpy(mp_req.name, ETH_DEV_MP_ACTION_REQUEST);\n+\n+\tif (req->t == REQ_TYPE_ATTACH)\n+\t\tclb = handle_async_attach_response;\n+\telse if (req->t == REQ_TYPE_PRE_DETACH)\n+\t\tclb = handle_async_pre_detach_response;\n+\telse if (req->t == REQ_TYPE_DETACH)\n+\t\tclb = handle_async_detach_response;\n+\telse if (req->t == REQ_TYPE_ATTACH_ROLLBACK)\n+\t\tclb = handle_async_rollback_response;\n+\telse\n+\t\treturn -1;\n+\tdo {\n+\t\tret = rte_mp_request_async(&mp_req, &ts, clb);\n+\t} while (ret != 0 && rte_errno == EEXIST);\n+\n+\tif (ret)\n+\t\tethdev_log(ERR, \"couldn't send async request\\n\");\n+\n+\treturn ret;\n+}\n+\n+static void\n+__handle_secondary_request(void *param)\n+{\n+\tstruct rte_mp_msg *msg = param;\n+\tconst struct eth_dev_mp_req *req =\n+\t\t(const struct eth_dev_mp_req *)msg->param;\n+\tstruct eth_dev_mp_req tmp_req;\n+\tstruct mp_request *entry;\n+\tuint16_t port_id;\n+\tint ret = 0;\n+\n+\tpthread_mutex_lock(&mp_request_list.lock);\n+\n+\tentry = find_request_by_id(req->id);\n+\tif (entry) {\n+\t\tethdev_log(ERR, \"duplicate request id\\n\");\n+\t\tret = -EEXIST;\n+\t\tgoto finish;\n+\t}\n+\n+\tentry = malloc(sizeof(*entry));\n+\tif (entry == NULL) {\n+\t\tethdev_log(ERR, \"not enough memory to allocate request entry\\n\");\n+\t\tret = -ENOMEM;\n+\t\tgoto finish;\n+\t}\n+\n+\tif (req->t == REQ_TYPE_ATTACH) {\n+\t\tret = do_eth_dev_attach(req->devargs, &port_id);\n+\t\tif (!ret) {\n+\t\t\ttmp_req = *req;\n+\t\t\ttmp_req.port_id = port_id;\n+\t\t\tret = send_request_to_secondary_async(&tmp_req);\n+\t\t}\n+\t} else if (req->t == REQ_TYPE_DETACH) {\n+\t\tif (!rte_eth_dev_is_valid_port(req->port_id))\n+\t\t\tret = -EINVAL;\n+\t\tif (!ret)\n+\t\t\tret = process_lock_callbacks(req->port_id);\n+\t\tif (!ret) {\n+\t\t\ttmp_req = *req;\n+\t\t\ttmp_req.t = REQ_TYPE_PRE_DETACH;\n+\t\t\tret = send_request_to_secondary_async(&tmp_req);\n+\t\t}\n+\t} else {\n+\t\tethdev_log(ERR, \"unsupported secondary to primary request\\n\");\n+\t\tret = -ENOTSUP;\n+\t\tgoto finish;\n+\t}\n+\n+\tif (ret) {\n+\t\tret = send_response_to_secondary(req, ret);\n+\t\tif (ret) {\n+\t\t\tethdev_log(ERR, \"failed to send response to secondary\\n\");\n+\t\t\tgoto finish;\n+\t\t}\n+\t} else {\n+\t\tmemcpy(&entry->user_req, req, sizeof(*req));\n+\t\tentry->state = REQ_STATE_ACTIVE;\n+\t\tTAILQ_INSERT_TAIL(&mp_request_list.list, entry, next);\n+\t\tentry = NULL;\n+\t}\n+\n+finish:\n+\tpthread_mutex_unlock(&mp_request_list.lock);\n+\tif (entry)\n+\t\tfree(entry);\n+\tfree(msg);\n+}\n+\n+static int\n+handle_secondary_request(const struct rte_mp_msg *msg,\n+\t\t\tconst void *peer __rte_unused)\n+{\n+\tstruct rte_mp_msg *msg_cpy;\n+\tconst struct eth_dev_mp_req *req =\n+\t\t(const struct eth_dev_mp_req *)msg->param;\n+\tint ret = 0;\n+\n+\tmsg_cpy = malloc(sizeof(*msg_cpy));\n+\tif (msg_cpy == NULL) {\n+\t\tethdev_log(ERR, \"not enough memory\\n\");\n+\t\treturn send_response_to_secondary(req, -ENOMEM);\n+\t}\n+\n+\tmemcpy(msg_cpy, msg, sizeof(*msg_cpy));\n+\n+\t/**\n+\t * We can't handle the secondary request in mp callback because\n+\t * we are running in primary process, we are going to invoke SYNC IPC\n+\t * in rte_malloc.\n+\t */\n+\tret = rte_eal_alarm_set(1, __handle_secondary_request, msg_cpy);\n+\tif (ret) {\n+\t\tethdev_log(ERR, \"failed to set alarm callback\\n\");\n+\t\treturn send_response_to_secondary(req, ret);\n+\t}\n+\treturn 0;\n+}\n+\n+static int\n+handle_primary_response(const struct rte_mp_msg *msg,\n+\t\t\tconst void *peer __rte_unused)\n+{\n+\tconst struct eth_dev_mp_req *req =\n+\t\t(const struct eth_dev_mp_req *)msg->param;\n+\tstruct mp_request *entry;\n+\n+\tpthread_mutex_lock(&mp_request_list.lock);\n+\n+\tentry = find_request_by_id(req->id);\n+\tif (entry) {\n+\t\tentry->user_req.result = req->result;\n+\t\tentry->user_req.port_id = req->port_id;\n+\t\tentry->state = REQ_STATE_COMPLETE;\n+\n+\t\tpthread_cond_signal(&entry->cond);\n+\t}\n+\n+\tpthread_mutex_unlock(&mp_request_list.lock);\n+\n+\treturn 0;\n }\n \n static int\n@@ -134,8 +561,74 @@ handle_primary_request(const struct rte_mp_msg *msg, const void *peer)\n \n int eth_dev_request_to_primary(struct eth_dev_mp_req *req)\n {\n-\tRTE_SET_USED(req);\n-\treturn -ENOTSUP;\n+\tstruct rte_mp_msg msg;\n+\tstruct eth_dev_mp_req *msg_req = (struct eth_dev_mp_req *)msg.param;\n+\tstruct mp_request *entry;\n+\tstruct timespec ts;\n+\tstruct timeval now;\n+\tint ret = 0;\n+\n+\tmemset(&msg, 0, sizeof(msg));\n+\tmemset(&ts, 0, sizeof(ts));\n+\n+\tentry = malloc(sizeof(*entry));\n+\tif (entry == NULL) {\n+\t\tethdev_log(ERR, \"not enough memory to allocate request entry\\n\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tpthread_mutex_lock(&mp_request_list.lock);\n+\n+\tret = gettimeofday(&now, NULL);\n+\tif (ret) {\n+\t\tethdev_log(ERR, \"cannot get current time\\n\");\n+\t\tret = -EINVAL;\n+\t\tgoto finish;\n+\t}\n+\n+\tts.tv_nsec = (now.tv_usec * 1000) % 1000000000;\n+\tts.tv_sec = now.tv_sec + MP_TIMEOUT_S +\n+\t\t\t(now.tv_usec * 1000) / 1000000000;\n+\n+\tpthread_cond_init(&entry->cond, NULL);\n+\n+\tmsg.len_param = sizeof(*req);\n+\tstrcpy(msg.name, ETH_DEV_MP_ACTION_REQUEST);\n+\n+\treq->id = get_unique_id();\n+\n+\tmemcpy(msg_req, req, sizeof(*req));\n+\n+\tret = rte_mp_sendmsg(&msg);\n+\tif (ret) {\n+\t\tethdev_log(ERR, \"cannot send message to primary\");\n+\t\tgoto finish;\n+\t}\n+\n+\tmemcpy(&entry->user_req, req, sizeof(*req));\n+\n+\tentry->state = REQ_STATE_ACTIVE;\n+\n+\tTAILQ_INSERT_TAIL(&mp_request_list.list, entry, next);\n+\n+\tdo {\n+\t\tret = pthread_cond_timedwait(&entry->cond,\n+\t\t\t\t&mp_request_list.lock, &ts);\n+\t} while (ret != 0 && ret != ETIMEDOUT);\n+\n+\tif (entry->state != REQ_STATE_COMPLETE) {\n+\t\tRTE_LOG(ERR, EAL, \"request time out\\n\");\n+\t\tret = -ETIMEDOUT;\n+\t} else {\n+\t\treq->port_id = entry->user_req.port_id;\n+\t\treq->result = entry->user_req.result;\n+\t}\n+\tTAILQ_REMOVE(&mp_request_list.list, entry, next);\n+\n+finish:\n+\tpthread_mutex_unlock(&mp_request_list.lock);\n+\tfree(entry);\n+\treturn ret;\n }\n \n /**\ndiff --git a/lib/librte_ethdev/ethdev_mp.h b/lib/librte_ethdev/ethdev_mp.h\nindex 40be46c89..94ff21cdd 100644\n--- a/lib/librte_ethdev/ethdev_mp.h\n+++ b/lib/librte_ethdev/ethdev_mp.h\n@@ -18,6 +18,7 @@ enum eth_dev_req_type {\n };\n \n struct eth_dev_mp_req {\n+\tuint64_t id;\n \tenum eth_dev_req_type t;\n \tchar devargs[MAX_DEV_ARGS_LEN];\n \tuint16_t port_id;\n",
    "prefixes": [
        "v3",
        "07/23"
    ]
}