get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/115903/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 115903,
    "url": "https://patches.dpdk.org/api/patches/115903/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20220905105828.3190335-13-junfeng.guo@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20220905105828.3190335-13-junfeng.guo@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20220905105828.3190335-13-junfeng.guo@intel.com",
    "date": "2022-09-05T10:58:26",
    "name": "[v2,12/14] net/idpf: add support for write back based on ITR expire",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "042fcc2416e683784761b842c5498858b1092837",
    "submitter": {
        "id": 1785,
        "url": "https://patches.dpdk.org/api/people/1785/?format=api",
        "name": "Junfeng Guo",
        "email": "junfeng.guo@intel.com"
    },
    "delegate": {
        "id": 3961,
        "url": "https://patches.dpdk.org/api/users/3961/?format=api",
        "username": "arybchenko",
        "first_name": "Andrew",
        "last_name": "Rybchenko",
        "email": "andrew.rybchenko@oktetlabs.ru"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20220905105828.3190335-13-junfeng.guo@intel.com/mbox/",
    "series": [
        {
            "id": 24538,
            "url": "https://patches.dpdk.org/api/series/24538/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=24538",
            "date": "2022-09-05T10:58:14",
            "name": "add support for idpf PMD in DPDK",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/24538/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/115903/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/115903/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 508DEA054F;\n\tMon,  5 Sep 2022 12:59:59 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 3FFE642B88;\n\tMon,  5 Sep 2022 12:59:04 +0200 (CEST)",
            "from mga03.intel.com (mga03.intel.com [134.134.136.65])\n by mails.dpdk.org (Postfix) with ESMTP id 79CDC42B98\n for <dev@dpdk.org>; Mon,  5 Sep 2022 12:59:02 +0200 (CEST)",
            "from fmsmga004.fm.intel.com ([10.253.24.48])\n by orsmga103.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 05 Sep 2022 03:59:01 -0700",
            "from dpdk-jf-ntb-v2.sh.intel.com ([10.67.118.246])\n by fmsmga004.fm.intel.com with ESMTP; 05 Sep 2022 03:59:00 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1662375542; x=1693911542;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=164M8h1yBnwAgW8dO99cfy2J/FQ1wMgfzAW9YpCgjEs=;\n b=DUa4NZBgGQk62zn69MRA0awqZZc8VUseGM5K1DCXEFV/m7grTJR0zkbn\n su4rhCaLemn/QdTHaF2lZDhUGfmlYnps9LsrNTXf0f0w5YWk3dtNjYubj\n 092PjBIFAZMW+7H58Dsyr/BVjZ2HD0d/mJyqSm60uDn0veCJqtkNA9jEO\n yYkfY0UCQIjNwYT4mNPrNWj0aPlhwnLY7rSqIp+DQmrX6jrBFXtI4EAWN\n inbJ23kTZe0NhfsteehOwIr4zw3CgQIfJrAlpoMKIeDIpa8QCUPrtvMRl\n Z1puiWJtACHtKhQT97z3YpmRz3jLHTYP8y2zbaxYUpOefyZ67xudYryJc g==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10460\"; a=\"297676352\"",
            "E=Sophos;i=\"5.93,291,1654585200\"; d=\"scan'208\";a=\"297676352\"",
            "E=Sophos;i=\"5.93,291,1654585200\"; d=\"scan'208\";a=\"682023948\""
        ],
        "X-ExtLoop1": "1",
        "From": "Junfeng Guo <junfeng.guo@intel.com>",
        "To": "qi.z.zhang@intel.com,\n\tjingjing.wu@intel.com,\n\tbeilei.xing@intel.com",
        "Cc": "dev@dpdk.org,\n\txiao.w.wang@intel.com,\n\tjunfeng.guo@intel.com",
        "Subject": "[PATCH v2 12/14] net/idpf: add support for write back based on ITR\n expire",
        "Date": "Mon,  5 Sep 2022 18:58:26 +0800",
        "Message-Id": "<20220905105828.3190335-13-junfeng.guo@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20220905105828.3190335-1-junfeng.guo@intel.com>",
        "References": "<20220803113104.1184059-1-junfeng.guo@intel.com>\n <20220905105828.3190335-1-junfeng.guo@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Force write-backs by setting WB_ON_ITR bit in DYN_CTL register,\nso that the packets can be received once at a time.\n\nSigned-off-by: Beilei Xing <beilei.xing@intel.com>\nSigned-off-by: Junfeng Guo <junfeng.guo@intel.com>\n---\n drivers/net/idpf/idpf_ethdev.c | 116 +++++++++++++++++++++++++++++++++\n drivers/net/idpf/idpf_ethdev.h |   3 +\n drivers/net/idpf/idpf_vchnl.c  | 108 ++++++++++++++++++++++++++++++\n 3 files changed, 227 insertions(+)",
    "diff": "diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c\nindex 9fee7a783f..b3ca4e3326 100644\n--- a/drivers/net/idpf/idpf_ethdev.c\n+++ b/drivers/net/idpf/idpf_ethdev.c\n@@ -520,6 +520,87 @@ idpf_dev_configure(struct rte_eth_dev *dev)\n \treturn ret;\n }\n \n+static int\n+idpf_config_rx_queues_irqs(struct rte_eth_dev *dev)\n+{\n+\tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_queue_vector *qv_map;\n+\tstruct iecm_hw *hw = &adapter->hw;\n+\tuint32_t dynctl_reg_start;\n+\tuint32_t itrn_reg_start;\n+\tuint32_t dynctl_val, itrn_val;\n+\tuint16_t i;\n+\n+\tqv_map = rte_zmalloc(\"qv_map\",\n+\t\t\tdev->data->nb_rx_queues *\n+\t\t\tsizeof(struct virtchnl2_queue_vector), 0);\n+\tif (!qv_map) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate %d queue-vector map\",\n+\t\t\t    dev->data->nb_rx_queues);\n+\t\tgoto qv_map_alloc_err;\n+\t}\n+\n+\t/* Rx interrupt disabled, Map interrupt only for writeback */\n+\n+\t/* The capability flags adapter->caps->other_caps here should be\n+\t * compared with bit VIRTCHNL2_CAP_WB_ON_ITR. The if condition should\n+\t * be updated when the FW can return correct flag bits.\n+\t */\n+\tif (adapter->caps->other_caps) {\n+\t\tdynctl_reg_start = vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;\n+\t\titrn_reg_start = vport->recv_vectors->vchunks.vchunks->itrn_reg_start;\n+\t\tdynctl_val = IECM_READ_REG(hw, dynctl_reg_start);\n+\t\tPMD_DRV_LOG(DEBUG, \"Value of dynctl_reg_start is 0x%x\", dynctl_val);\n+\t\titrn_val = IECM_READ_REG(hw, itrn_reg_start);\n+\t\tPMD_DRV_LOG(DEBUG, \"Value of itrn_reg_start is 0x%x\", itrn_val);\n+\t\t/* Force write-backs by setting WB_ON_ITR bit in DYN_CTL\n+\t\t * register. WB_ON_ITR and INTENA are mutually exclusive\n+\t\t * bits. Setting WB_ON_ITR bits means TX and RX Descs\n+\t\t * are writen back based on ITR expiration irrespective\n+\t\t * of INTENA setting.\n+\t\t */\n+\t\t/* TBD: need to tune INTERVAL value for better performance. */\n+\t\tif (itrn_val)\n+\t\t\tIECM_WRITE_REG(hw,\n+\t\t\t\t       dynctl_reg_start,\n+\t\t\t\t       VIRTCHNL2_ITR_IDX_0  <<\n+\t\t\t\t       PF_GLINT_DYN_CTL_ITR_INDX_S |\n+\t\t\t\t       PF_GLINT_DYN_CTL_WB_ON_ITR_M |\n+\t\t\t\t       itrn_val <<\n+\t\t\t\t       PF_GLINT_DYN_CTL_INTERVAL_S);\n+\t\telse\n+\t\t\tIECM_WRITE_REG(hw,\n+\t\t\t\t       dynctl_reg_start,\n+\t\t\t\t       VIRTCHNL2_ITR_IDX_0  <<\n+\t\t\t\t       PF_GLINT_DYN_CTL_ITR_INDX_S |\n+\t\t\t\t       PF_GLINT_DYN_CTL_WB_ON_ITR_M |\n+\t\t\t\t       IDPF_DFLT_INTERVAL <<\n+\t\t\t\t       PF_GLINT_DYN_CTL_INTERVAL_S);\n+\t}\n+\tfor (i = 0; i < dev->data->nb_rx_queues; i++) {\n+\t\t/* map all queues to the same vector */\n+\t\tqv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;\n+\t\tqv_map[i].vector_id =\n+\t\t\tvport->recv_vectors->vchunks.vchunks->start_vector_id;\n+\t}\n+\tvport->qv_map = qv_map;\n+\n+\tif (idpf_config_irq_map_unmap(vport, true)) {\n+\t\tPMD_DRV_LOG(ERR, \"config interrupt mapping failed\");\n+\t\tgoto config_irq_map_err;\n+\t}\n+\n+\treturn 0;\n+\n+config_irq_map_err:\n+\trte_free(vport->qv_map);\n+\tvport->qv_map = NULL;\n+\n+qv_map_alloc_err:\n+\treturn -1;\n+}\n+\n static int\n idpf_start_queues(struct rte_eth_dev *dev)\n {\n@@ -557,6 +638,10 @@ static int\n idpf_dev_start(struct rte_eth_dev *dev)\n {\n \tstruct idpf_vport *vport = dev->data->dev_private;\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tuint16_t num_allocated_vectors =\n+\t\tadapter->caps->num_allocated_vectors;\n+\tuint16_t req_vecs_num;\n \n \tPMD_INIT_FUNC_TRACE();\n \n@@ -569,6 +654,23 @@ idpf_dev_start(struct rte_eth_dev *dev)\n \n \tvport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;\n \n+\treq_vecs_num = IDPF_DFLT_Q_VEC_NUM;\n+\tif (req_vecs_num + adapter->used_vecs_num > num_allocated_vectors) {\n+\t\tPMD_DRV_LOG(ERR, \"The accumulated request vectors' number should be less than %d\",\n+\t\t\t    num_allocated_vectors);\n+\t\tgoto err_mtu;\n+\t}\n+\tif (idpf_alloc_vectors(vport, req_vecs_num)) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to allocate interrupt vectors\");\n+\t\tgoto err_mtu;\n+\t}\n+\tadapter->used_vecs_num += req_vecs_num;\n+\n+\tif (idpf_config_rx_queues_irqs(dev)) {\n+\t\tPMD_DRV_LOG(ERR, \"Failed to configure irqs\");\n+\t\tgoto err_mtu;\n+\t}\n+\n \tif (idpf_start_queues(dev)) {\n \t\tPMD_DRV_LOG(ERR, \"Failed to start queues\");\n \t\tgoto err_mtu;\n@@ -609,6 +711,12 @@ idpf_dev_stop(struct rte_eth_dev *dev)\n \n \tidpf_stop_queues(dev);\n \n+\tif (idpf_config_irq_map_unmap(vport, false))\n+\t\tPMD_DRV_LOG(ERR, \"config interrupt unmapping failed\");\n+\n+\tif (idpf_dealloc_vectors(vport))\n+\t\tPMD_DRV_LOG(ERR, \"deallocate interrupt vectors failed\");\n+\n \tvport->stopped = 1;\n \tdev->data->dev_started = 0;\n \n@@ -633,6 +741,12 @@ idpf_dev_close(struct rte_eth_dev *dev)\n \trte_free(vport->rss_key);\n \tvport->rss_key = NULL;\n \n+\trte_free(vport->recv_vectors);\n+\tvport->recv_vectors = NULL;\n+\n+\trte_free(vport->qv_map);\n+\tvport->qv_map = NULL;\n+\n \tadapter->cur_vports &= ~BIT(vport->devarg_id);\n \n \trte_free(vport);\n@@ -982,6 +1096,8 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct idpf_adapter *adapter)\n \tadapter->cur_vport_nb = 0;\n \tadapter->next_vport_idx = 0;\n \n+\tadapter->used_vecs_num = 0;\n+\n \treturn ret;\n \n err_vports:\ndiff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h\nindex 2657b75c95..f96867f3d5 100644\n--- a/drivers/net/idpf/idpf_ethdev.h\n+++ b/drivers/net/idpf/idpf_ethdev.h\n@@ -252,6 +252,9 @@ int idpf_ena_dis_queues(struct idpf_vport *vport, bool enable);\n int idpf_ena_dis_vport(struct idpf_vport *vport, bool enable);\n int idpf_query_stats(struct idpf_vport *vport,\n \t\t     struct virtchnl2_vport_stats **pstats);\n+int idpf_config_irq_map_unmap(struct idpf_vport *vport, bool map);\n+int idpf_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors);\n+int idpf_dealloc_vectors(struct idpf_vport *vport);\n int idpf_query_ptype_info(struct idpf_adapter *adapter);\n int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,\n \t\t      uint16_t buf_len, uint8_t *buf);\ndiff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c\nindex 49572dc83c..97fcfb574b 100644\n--- a/drivers/net/idpf/idpf_vchnl.c\n+++ b/drivers/net/idpf/idpf_vchnl.c\n@@ -1133,6 +1133,114 @@ idpf_config_txq(struct idpf_vport *vport, uint16_t txq_id)\n \n \treturn err;\n }\n+\n+int\n+idpf_config_irq_map_unmap(struct idpf_vport *vport, bool map)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_queue_vector_maps *map_info;\n+\tstruct virtchnl2_queue_vector *vecmap;\n+\tuint16_t nb_rxq = vport->dev_data->nb_rx_queues;\n+\tstruct idpf_cmd_info args;\n+\tint len, i, err = 0;\n+\n+\tlen = sizeof(struct virtchnl2_queue_vector_maps) +\n+\t\t(nb_rxq - 1) * sizeof(struct virtchnl2_queue_vector);\n+\n+\tmap_info = rte_zmalloc(\"map_info\", len, 0);\n+\tif (!map_info)\n+\t\treturn -ENOMEM;\n+\n+\tmap_info->vport_id = vport->vport_id;\n+\tmap_info->num_qv_maps = nb_rxq;\n+\tfor (i = 0; i < nb_rxq; i++) {\n+\t\tvecmap = &map_info->qv_maps[i];\n+\t\tvecmap->queue_id = vport->qv_map[i].queue_id;\n+\t\tvecmap->vector_id = vport->qv_map[i].vector_id;\n+\t\tvecmap->itr_idx = VIRTCHNL2_ITR_IDX_0;\n+\t\tvecmap->queue_type = VIRTCHNL2_QUEUE_TYPE_RX;\n+\t}\n+\n+\targs.ops = map ? VIRTCHNL2_OP_MAP_QUEUE_VECTOR :\n+\t\tVIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;\n+\targs.in_args = (u8 *)map_info;\n+\targs.in_args_size = len;\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command of VIRTCHNL2_OP_%s_QUEUE_VECTOR\",\n+\t\t\t    map ? \"MAP\" : \"UNMAP\");\n+\n+\trte_free(map_info);\n+\treturn err;\n+}\n+\n+int\n+idpf_alloc_vectors(struct idpf_vport *vport, uint16_t num_vectors)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_alloc_vectors *alloc_vec;\n+\tstruct idpf_cmd_info args;\n+\tint err, len;\n+\n+\tlen = sizeof(struct virtchnl2_alloc_vectors) +\n+\t\t(num_vectors - 1) * sizeof(struct virtchnl2_vector_chunk);\n+\talloc_vec = rte_zmalloc(\"alloc_vec\", len, 0);\n+\tif (!alloc_vec)\n+\t\treturn -ENOMEM;\n+\n+\talloc_vec->num_vectors = num_vectors;\n+\n+\targs.ops = VIRTCHNL2_OP_ALLOC_VECTORS;\n+\targs.in_args = (u8 *)alloc_vec;\n+\targs.in_args_size = sizeof(struct virtchnl2_alloc_vectors);\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command VIRTCHNL2_OP_ALLOC_VECTORS\");\n+\n+\tif (!vport->recv_vectors) {\n+\t\tvport->recv_vectors = rte_zmalloc(\"recv_vectors\", len, 0);\n+\t\tif (!vport->recv_vectors) {\n+\t\t\trte_free(alloc_vec);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t}\n+\n+\trte_memcpy(vport->recv_vectors, args.out_buffer, len);\n+\trte_free(alloc_vec);\n+\treturn err;\n+}\n+\n+int\n+idpf_dealloc_vectors(struct idpf_vport *vport)\n+{\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct virtchnl2_alloc_vectors *alloc_vec;\n+\tstruct virtchnl2_vector_chunks *vcs;\n+\tstruct idpf_cmd_info args;\n+\tint err, len;\n+\n+\talloc_vec = vport->recv_vectors;\n+\tvcs = &alloc_vec->vchunks;\n+\n+\tlen = sizeof(struct virtchnl2_vector_chunks) +\n+\t\t(vcs->num_vchunks - 1) * sizeof(struct virtchnl2_vector_chunk);\n+\n+\targs.ops = VIRTCHNL2_OP_DEALLOC_VECTORS;\n+\targs.in_args = (u8 *)vcs;\n+\targs.in_args_size = len;\n+\targs.out_buffer = adapter->mbx_resp;\n+\targs.out_size = IDPF_DFLT_MBX_BUF_SIZE;\n+\terr = idpf_execute_vc_cmd(adapter, &args);\n+\tif (err)\n+\t\tPMD_DRV_LOG(ERR, \"Failed to execute command VIRTCHNL2_OP_DEALLOC_VECTORS\");\n+\n+\treturn err;\n+}\n+\n static int\n idpf_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,\n \t\t       uint32_t type, bool on)\n",
    "prefixes": [
        "v2",
        "12/14"
    ]
}