get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/53653/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 53653,
    "url": "http://patches.dpdk.org/api/patches/53653/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20190523081339.56348-12-jerinj@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190523081339.56348-12-jerinj@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190523081339.56348-12-jerinj@marvell.com",
    "date": "2019-05-23T08:13:23",
    "name": "[v1,11/27] common/octeontx2: add PF to VF mailbox IRQ and msg handlers",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "64486c7ca46112b0d6df50fc736949af0b3be7ab",
    "submitter": {
        "id": 1188,
        "url": "http://patches.dpdk.org/api/people/1188/?format=api",
        "name": "Jerin Jacob Kollanukkaran",
        "email": "jerinj@marvell.com"
    },
    "delegate": null,
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20190523081339.56348-12-jerinj@marvell.com/mbox/",
    "series": [
        {
            "id": 4754,
            "url": "http://patches.dpdk.org/api/series/4754/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=4754",
            "date": "2019-05-23T08:13:12",
            "name": "OCTEON TX2 common and mempool driver",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/4754/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/53653/comments/",
    "check": "fail",
    "checks": "http://patches.dpdk.org/api/patches/53653/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 5EE781B9CD;\n\tThu, 23 May 2019 10:17:26 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n\t[67.231.156.173]) by dpdk.org (Postfix) with ESMTP id EB6A91B94F\n\tfor <dev@dpdk.org>; Thu, 23 May 2019 10:17:03 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n\tby mx0b-0016f401.pphosted.com (8.16.0.27/8.16.0.27) with SMTP id\n\tx4N89eux019037; Thu, 23 May 2019 01:17:03 -0700",
            "from sc-exch02.marvell.com ([199.233.58.182])\n\tby mx0b-0016f401.pphosted.com with ESMTP id 2smnwk0sfp-1\n\t(version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); \n\tThu, 23 May 2019 01:17:03 -0700",
            "from SC-EXCH01.marvell.com (10.93.176.81) by SC-EXCH02.marvell.com\n\t(10.93.176.82) with Microsoft SMTP Server (TLS) id 15.0.1367.3;\n\tThu, 23 May 2019 01:15:34 -0700",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH01.marvell.com\n\t(10.93.176.81) with Microsoft SMTP Server id 15.0.1367.3 via Frontend\n\tTransport; Thu, 23 May 2019 01:15:34 -0700",
            "from jerin-lab.marvell.com (unknown [10.28.34.14])\n\tby maili.marvell.com (Postfix) with ESMTP id 8C7C63F703F;\n\tThu, 23 May 2019 01:15:33 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n\th=from : to : cc :\n\tsubject : date : message-id : in-reply-to : references : mime-version\n\t: content-transfer-encoding : content-type; s=pfpt0818;\n\tbh=0+KQk0Qhi92rzP4UR2jlHr9xs+LM56gGLePeg9d1M54=;\n\tb=B3+8SIVs5F6h2G9yJba82jv18u1YKy0C2IOvbxuxABs14R+DW/XCUMgWqhpKX0N5TXKE\n\tPhhxMF720FfrwY5Huxzp/FhL+SnxcItD6jrOyT14zoLX55eZdphRbMme4FDU3gfKu8Kc\n\tU+XTKDFwBHPkuwQDQwEXg+5TXrWtYtzXk2/LjHhCMStDX42j95zb+MomeF/emjQbuyEu\n\tU15ERgkhg5PMRJQbCYpdXlnyzJ/fORE9WCj0nCmHlbFFbC08CV+pPdHylNCRqNzsmCcq\n\tndTQDGHan2Ggrw6utsL2fLIcb4Jk2d1s0Y788xoGmMteXvfo22Cq5Tk32jSO/zHh8XVG\n\tAQ== ",
        "From": "<jerinj@marvell.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<thomas@monjalon.net>, Nithin Dabilpuram <ndabilpuram@marvell.com>,\n\tKrzysztof Kanas <kkanas@marvell.com>",
        "Date": "Thu, 23 May 2019 13:43:23 +0530",
        "Message-ID": "<20190523081339.56348-12-jerinj@marvell.com>",
        "X-Mailer": "git-send-email 2.21.0",
        "In-Reply-To": "<20190523081339.56348-1-jerinj@marvell.com>",
        "References": "<20190523081339.56348-1-jerinj@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:, ,\n\tdefinitions=2019-05-23_08:, , signatures=0",
        "Subject": "[dpdk-dev] [PATCH v1 11/27] common/octeontx2: add PF to VF mailbox\n\tIRQ and msg handlers",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "From: Nithin Dabilpuram <ndabilpuram@marvell.com>\n\nPF has additional responsibility being server for VF messages\nand forward to AF and once AF process it then forward\nthe response back to VF.\notx2_vf_pf_mbox_irq() will process the VF mailbox request and\naf_pf_wait_msg() will until getting a response back from AF.\n\nSigned-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>\nSigned-off-by: Krzysztof Kanas <kkanas@marvell.com>\n---\n drivers/common/octeontx2/otx2_dev.c | 240 +++++++++++++++++++++++++++-\n 1 file changed, 239 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/drivers/common/octeontx2/otx2_dev.c b/drivers/common/octeontx2/otx2_dev.c\nindex ba4fd9547..09b551819 100644\n--- a/drivers/common/octeontx2/otx2_dev.c\n+++ b/drivers/common/octeontx2/otx2_dev.c\n@@ -7,6 +7,7 @@\n #include <sys/mman.h>\n #include <unistd.h>\n \n+#include <rte_alarm.h>\n #include <rte_common.h>\n #include <rte_eal.h>\n #include <rte_memcpy.h>\n@@ -50,6 +51,200 @@ mbox_mem_unmap(void *va, size_t size)\n \t\tmunmap(va, size);\n }\n \n+static int\n+af_pf_wait_msg(struct otx2_dev *dev, uint16_t vf, int num_msg)\n+{\n+\tuint32_t timeout = 0, sleep = 1; struct otx2_mbox *mbox = dev->mbox;\n+\tstruct otx2_mbox_dev *mdev = &mbox->dev[0];\n+\tvolatile uint64_t int_status;\n+\tstruct mbox_hdr *req_hdr;\n+\tstruct mbox_msghdr *msg;\n+\tstruct mbox_msghdr *rsp;\n+\tuint64_t offset;\n+\tsize_t size;\n+\tint i;\n+\n+\t/* We need to disable PF interrupts. We are in timer interrupt */\n+\totx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);\n+\n+\t/* Send message */\n+\totx2_mbox_msg_send(mbox, 0);\n+\n+\tdo {\n+\t\trte_delay_ms(sleep);\n+\t\ttimeout++;\n+\t\tif (timeout >= MBOX_RSP_TIMEOUT) {\n+\t\t\totx2_err(\"Routed messages %d timeout: %dms\",\n+\t\t\t\t num_msg, MBOX_RSP_TIMEOUT);\n+\t\t\tbreak;\n+\t\t}\n+\t\tint_status = otx2_read64(dev->bar2 + RVU_PF_INT);\n+\t} while ((int_status & 0x1) != 0x1);\n+\n+\t/* Clear */\n+\totx2_write64(~0ull, dev->bar2 + RVU_PF_INT);\n+\n+\t/* Enable interrupts */\n+\totx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);\n+\n+\trte_spinlock_lock(&mdev->mbox_lock);\n+\n+\treq_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);\n+\tif (req_hdr->num_msgs != num_msg)\n+\t\totx2_err(\"Routed messages: %d received: %d\", num_msg,\n+\t\t\t req_hdr->num_msgs);\n+\n+\t/* Get messages from mbox */\n+\toffset = mbox->rx_start +\n+\t\t\tRTE_ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);\n+\tfor (i = 0; i < req_hdr->num_msgs; i++) {\n+\t\tmsg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);\n+\t\tsize = mbox->rx_start + msg->next_msgoff - offset;\n+\n+\t\t/* Reserve PF/VF mbox message */\n+\t\tsize = RTE_ALIGN(size, MBOX_MSG_ALIGN);\n+\t\trsp = otx2_mbox_alloc_msg(&dev->mbox_vfpf, vf, size);\n+\t\totx2_mbox_rsp_init(msg->id, rsp);\n+\n+\t\t/* Copy message from AF<->PF mbox to PF<->VF mbox */\n+\t\totx2_mbox_memcpy((uint8_t *)rsp + sizeof(struct mbox_msghdr),\n+\t\t\t\t (uint8_t *)msg + sizeof(struct mbox_msghdr),\n+\t\t\t\t size - sizeof(struct mbox_msghdr));\n+\n+\t\t/* Set status and sender pf_func data */\n+\t\trsp->rc = msg->rc;\n+\t\trsp->pcifunc = msg->pcifunc;\n+\n+\t\toffset = mbox->rx_start + msg->next_msgoff;\n+\t}\n+\trte_spinlock_unlock(&mdev->mbox_lock);\n+\n+\treturn req_hdr->num_msgs;\n+}\n+\n+static int\n+vf_pf_process_msgs(struct otx2_dev *dev, uint16_t vf)\n+{\n+\tint offset, routed = 0; struct otx2_mbox *mbox = &dev->mbox_vfpf;\n+\tstruct otx2_mbox_dev *mdev = &mbox->dev[vf];\n+\tstruct mbox_hdr *req_hdr;\n+\tstruct mbox_msghdr *msg;\n+\tsize_t size;\n+\tuint16_t i;\n+\n+\treq_hdr = (struct mbox_hdr *)((uintptr_t)mdev->mbase + mbox->rx_start);\n+\tif (!req_hdr->num_msgs)\n+\t\treturn 0;\n+\n+\toffset = mbox->rx_start + RTE_ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);\n+\n+\tfor (i = 0; i < req_hdr->num_msgs; i++) {\n+\n+\t\tmsg = (struct mbox_msghdr *)((uintptr_t)mdev->mbase + offset);\n+\t\tsize = mbox->rx_start + msg->next_msgoff - offset;\n+\n+\t\t/* RVU_PF_FUNC_S */\n+\t\tmsg->pcifunc = otx2_pfvf_func(dev->pf, vf);\n+\n+\t\tif (msg->id == MBOX_MSG_READY) {\n+\t\t\tstruct ready_msg_rsp *rsp;\n+\t\t\tuint16_t max_bits = sizeof(dev->active_vfs[0]) * 8;\n+\n+\t\t\t/* Handle READY message in PF */\n+\t\t\tdev->active_vfs[vf / max_bits] |=\n+\t\t\t\t\t\tBIT_ULL(vf % max_bits);\n+\t\t\trsp = (struct ready_msg_rsp *)\n+\t\t\t       otx2_mbox_alloc_msg(mbox, vf, sizeof(*rsp));\n+\t\t\totx2_mbox_rsp_init(msg->id, rsp);\n+\n+\t\t\t/* PF/VF function ID */\n+\t\t\trsp->hdr.pcifunc = msg->pcifunc;\n+\t\t\trsp->hdr.rc = 0;\n+\t\t} else {\n+\t\t\tstruct mbox_msghdr *af_req;\n+\t\t\t/* Reserve AF/PF mbox message */\n+\t\t\tsize = RTE_ALIGN(size, MBOX_MSG_ALIGN);\n+\t\t\taf_req = otx2_mbox_alloc_msg(dev->mbox, 0, size);\n+\t\t\totx2_mbox_req_init(msg->id, af_req);\n+\n+\t\t\t/* Copy message from VF<->PF mbox to PF<->AF mbox */\n+\t\t\totx2_mbox_memcpy((uint8_t *)af_req +\n+\t\t\t\t   sizeof(struct mbox_msghdr),\n+\t\t\t\t   (uint8_t *)msg + sizeof(struct mbox_msghdr),\n+\t\t\t\t   size - sizeof(struct mbox_msghdr));\n+\t\t\taf_req->pcifunc = msg->pcifunc;\n+\t\t\trouted++;\n+\t\t}\n+\t\toffset = mbox->rx_start + msg->next_msgoff;\n+\t}\n+\n+\tif (routed > 0) {\n+\t\totx2_base_dbg(\"pf:%d routed %d messages from vf:%d to AF\",\n+\t\t\t      dev->pf, routed, vf);\n+\t\taf_pf_wait_msg(dev, vf, routed);\n+\t\totx2_mbox_reset(dev->mbox, 0);\n+\t}\n+\n+\t/* Send mbox responses to VF */\n+\tif (mdev->num_msgs) {\n+\t\totx2_base_dbg(\"pf:%d reply %d messages to vf:%d\",\n+\t\t\t      dev->pf, mdev->num_msgs, vf);\n+\t\totx2_mbox_msg_send(mbox, vf);\n+\t}\n+\n+\treturn i;\n+}\n+\n+static void\n+otx2_vf_pf_mbox_handle_msg(void *param)\n+{\n+\tuint16_t vf, max_vf, max_bits;\n+\tstruct otx2_dev *dev = param;\n+\n+\tmax_bits = sizeof(dev->intr.bits[0]) * sizeof(uint64_t);\n+\tmax_vf = max_bits * MAX_VFPF_DWORD_BITS;\n+\n+\tfor (vf = 0; vf < max_vf; vf++) {\n+\t\tif (dev->intr.bits[vf/max_bits] & BIT_ULL(vf%max_bits)) {\n+\t\t\totx2_base_dbg(\"Process vf:%d request (pf:%d, vf:%d)\",\n+\t\t\t\t       vf, dev->pf, dev->vf);\n+\t\t\tvf_pf_process_msgs(dev, vf);\n+\t\t\tdev->intr.bits[vf/max_bits] &= ~(BIT_ULL(vf%max_bits));\n+\t\t}\n+\t}\n+\tdev->timer_set = 0;\n+}\n+\n+static void\n+otx2_vf_pf_mbox_irq(void *param)\n+{\n+\tstruct otx2_dev *dev = param;\n+\tbool alarm_set = false;\n+\tuint64_t intr;\n+\tint vfpf;\n+\n+\tfor (vfpf = 0; vfpf < MAX_VFPF_DWORD_BITS; ++vfpf) {\n+\t\tintr = otx2_read64(dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));\n+\t\tif (!intr)\n+\t\t\tcontinue;\n+\n+\t\totx2_base_dbg(\"vfpf: %d intr: 0x%\" PRIx64 \" (pf:%d, vf:%d)\",\n+\t\t\t      vfpf, intr, dev->pf, dev->vf);\n+\n+\t\t/* Save and clear intr bits */\n+\t\tdev->intr.bits[vfpf] |= intr;\n+\t\totx2_write64(intr, dev->bar2 + RVU_PF_VFPF_MBOX_INTX(vfpf));\n+\t\talarm_set = true;\n+\t}\n+\n+\tif (!dev->timer_set && alarm_set) {\n+\t\tdev->timer_set = 1;\n+\t\t/* Start timer to handle messages */\n+\t\trte_eal_alarm_set(VF_PF_MBOX_TIMER_MS,\n+\t\t\t\t  otx2_vf_pf_mbox_handle_msg, dev);\n+\t}\n+}\n+\n static void\n otx2_process_msgs(struct otx2_dev *dev, struct otx2_mbox *mbox)\n {\n@@ -118,12 +313,33 @@ static int\n mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)\n {\n \tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n-\tint rc;\n+\tint i, rc;\n+\n+\t/* HW clear irq */\n+\tfor (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)\n+\t\totx2_write64(~0ull, dev->bar2 +\n+\t\t\t     RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));\n \n \totx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);\n \n \tdev->timer_set = 0;\n \n+\t/* MBOX interrupt for VF(0...63) <-> PF */\n+\trc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,\n+\t\t\t       RVU_PF_INT_VEC_VFPF_MBOX0);\n+\n+\tif (rc) {\n+\t\totx2_err(\"Fail to register PF(VF0-63) mbox irq\");\n+\t\treturn rc;\n+\t}\n+\t/* MBOX interrupt for VF(64...128) <-> PF */\n+\trc = otx2_register_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,\n+\t\t\t       RVU_PF_INT_VEC_VFPF_MBOX1);\n+\n+\tif (rc) {\n+\t\totx2_err(\"Fail to register PF(VF64-128) mbox irq\");\n+\t\treturn rc;\n+\t}\n \t/* MBOX interrupt AF <-> PF */\n \trc = otx2_register_irq(intr_handle, otx2_af_pf_mbox_irq,\n \t\t\t       dev, RVU_PF_INT_VEC_AFPF_MBOX);\n@@ -132,6 +348,11 @@ mbox_register_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)\n \t\treturn rc;\n \t}\n \n+\t/* HW enable intr */\n+\tfor (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)\n+\t\totx2_write64(~0ull, dev->bar2 +\n+\t\t\tRVU_PF_VFPF_MBOX_INT_ENA_W1SX(i));\n+\n \totx2_write64(~0ull, dev->bar2 + RVU_PF_INT);\n \totx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1S);\n \n@@ -142,11 +363,28 @@ static void\n mbox_unregister_irq(struct rte_pci_device *pci_dev, struct otx2_dev *dev)\n {\n \tstruct rte_intr_handle *intr_handle = &pci_dev->intr_handle;\n+\tint i;\n+\n+\t/* HW clear irq */\n+\tfor (i = 0; i < MAX_VFPF_DWORD_BITS; ++i)\n+\t\totx2_write64(~0ull, dev->bar2 +\n+\t\t\t     RVU_PF_VFPF_MBOX_INT_ENA_W1CX(i));\n \n \totx2_write64(~0ull, dev->bar2 + RVU_PF_INT_ENA_W1C);\n \n \tdev->timer_set = 0;\n \n+\trte_eal_alarm_cancel(otx2_vf_pf_mbox_handle_msg, dev);\n+\n+\t/* Unregister the interrupt handler for each vectors */\n+\t/* MBOX interrupt for VF(0...63) <-> PF */\n+\totx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,\n+\t\t\t    RVU_PF_INT_VEC_VFPF_MBOX0);\n+\n+\t/* MBOX interrupt for VF(64...128) <-> PF */\n+\totx2_unregister_irq(intr_handle, otx2_vf_pf_mbox_irq, dev,\n+\t\t\t    RVU_PF_INT_VEC_VFPF_MBOX1);\n+\n \t/* MBOX interrupt AF <-> PF */\n \totx2_unregister_irq(intr_handle, otx2_af_pf_mbox_irq, dev,\n \t\t\t    RVU_PF_INT_VEC_AFPF_MBOX);\n",
    "prefixes": [
        "v1",
        "11/27"
    ]
}