get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/97839/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 97839,
    "url": "http://patches.dpdk.org/api/patches/97839/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20210902175955.9202-4-apeksha.gupta@nxp.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20210902175955.9202-4-apeksha.gupta@nxp.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20210902175955.9202-4-apeksha.gupta@nxp.com",
    "date": "2021-09-02T17:59:53",
    "name": "[v2,3/5] net/enetfec: support queue configuration",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "5c23aee2e31d3f728c5a4fa2b57195cd237c5287",
    "submitter": {
        "id": 1570,
        "url": "http://patches.dpdk.org/api/people/1570/?format=api",
        "name": "Apeksha Gupta",
        "email": "apeksha.gupta@nxp.com"
    },
    "delegate": {
        "id": 319,
        "url": "http://patches.dpdk.org/api/users/319/?format=api",
        "username": "fyigit",
        "first_name": "Ferruh",
        "last_name": "Yigit",
        "email": "ferruh.yigit@amd.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20210902175955.9202-4-apeksha.gupta@nxp.com/mbox/",
    "series": [
        {
            "id": 18636,
            "url": "http://patches.dpdk.org/api/series/18636/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=18636",
            "date": "2021-09-02T17:59:50",
            "name": "drivers/net: add NXP ENETFEC driver",
            "version": 2,
            "mbox": "http://patches.dpdk.org/series/18636/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/97839/comments/",
    "check": "warning",
    "checks": "http://patches.dpdk.org/api/patches/97839/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id B1C44A0C4C;\n\tThu,  2 Sep 2021 20:01:16 +0200 (CEST)",
            "from [217.70.189.124] (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 2899A410DC;\n\tThu,  2 Sep 2021 20:01:05 +0200 (CEST)",
            "from inva021.nxp.com (inva021.nxp.com [92.121.34.21])\n by mails.dpdk.org (Postfix) with ESMTP id ECF1640DDE\n for <dev@dpdk.org>; Thu,  2 Sep 2021 20:01:03 +0200 (CEST)",
            "from inva021.nxp.com (localhost [127.0.0.1])\n by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id C7C2E201F4D;\n Thu,  2 Sep 2021 20:01:03 +0200 (CEST)",
            "from aprdc01srsp001v.ap-rdc01.nxp.com\n (aprdc01srsp001v.ap-rdc01.nxp.com [165.114.16.16])\n by inva021.eu-rdc02.nxp.com (Postfix) with ESMTP id 64088201F51;\n Thu,  2 Sep 2021 20:01:03 +0200 (CEST)",
            "from lsv03186.swis.in-blr01.nxp.com (lsv03186.swis.in-blr01.nxp.com\n [92.120.146.182])\n by aprdc01srsp001v.ap-rdc01.nxp.com (Postfix) with ESMTP id 70777183AC89;\n Fri,  3 Sep 2021 02:01:02 +0800 (+08)"
        ],
        "From": "Apeksha Gupta <apeksha.gupta@nxp.com>",
        "To": "andrew.rybchenko@oktetlabs.ru,\n\tferruh.yigit@intel.com",
        "Cc": "dev@dpdk.org, hemant.agrawal@nxp.com, sachin.saxena@nxp.com,\n Apeksha Gupta <apeksha.gupta@nxp.com>",
        "Date": "Thu,  2 Sep 2021 23:29:53 +0530",
        "Message-Id": "<20210902175955.9202-4-apeksha.gupta@nxp.com>",
        "X-Mailer": "git-send-email 2.17.1",
        "In-Reply-To": "<20210902175955.9202-1-apeksha.gupta@nxp.com>",
        "References": "<20210902175955.9202-1-apeksha.gupta@nxp.com>",
        "X-Virus-Scanned": "ClamAV using ClamSMTP",
        "Subject": "[dpdk-dev] [PATCH v2 3/5] net/enetfec: support queue configuration",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "This patch adds Rx/Tx queue configuration setup operations.\nOn packet reception the respective BD Ring status bit is set\nwhich is then used for packet processing.\n\nSigned-off-by: Sachin Saxena <sachin.saxena@nxp.com>\nSigned-off-by: Apeksha Gupta <apeksha.gupta@nxp.com>\n---\n drivers/net/enetfec/enet_ethdev.c | 230 +++++++++++++++++++++++++++++-\n 1 file changed, 229 insertions(+), 1 deletion(-)",
    "diff": "diff --git a/drivers/net/enetfec/enet_ethdev.c b/drivers/net/enetfec/enet_ethdev.c\nindex 673361e3f8..b8bc4a5f8b 100644\n--- a/drivers/net/enetfec/enet_ethdev.c\n+++ b/drivers/net/enetfec/enet_ethdev.c\n@@ -46,6 +46,19 @@\n int enetfec_logtype_pmd;\n uint32_t e_cntl;\n \n+/* Supported Rx offloads */\n+static uint64_t dev_rx_offloads_sup =\n+\t\tDEV_RX_OFFLOAD_IPV4_CKSUM |\n+\t\tDEV_RX_OFFLOAD_UDP_CKSUM |\n+\t\tDEV_RX_OFFLOAD_TCP_CKSUM |\n+\t\tDEV_RX_OFFLOAD_VLAN_STRIP |\n+\t\tDEV_RX_OFFLOAD_CHECKSUM;\n+\n+static uint64_t dev_tx_offloads_sup =\n+\t\tDEV_TX_OFFLOAD_IPV4_CKSUM |\n+\t\tDEV_TX_OFFLOAD_UDP_CKSUM |\n+\t\tDEV_TX_OFFLOAD_TCP_CKSUM;\n+\n /*\n  * This function is called to start or restart the ENETFEC during a link\n  * change, transmit timeout, or to reconfigure the ENETFEC. The network\n@@ -214,10 +227,225 @@ enetfec_eth_stop(__rte_unused struct rte_eth_dev *dev)\n \treturn 0;\n }\n \n+static int\n+enetfec_eth_info(__rte_unused struct rte_eth_dev *dev,\n+\t     struct rte_eth_dev_info *dev_info)\n+{\n+\tdev_info->max_rx_queues = ENETFEC_MAX_Q;\n+\tdev_info->max_tx_queues = ENETFEC_MAX_Q;\n+\tdev_info->rx_offload_capa = dev_rx_offloads_sup;\n+\tdev_info->tx_offload_capa = dev_tx_offloads_sup;\n+\treturn 0;\n+}\n+\n+static const unsigned short offset_des_active_rxq[] = {\n+\tENETFEC_RDAR_0, ENETFEC_RDAR_1, ENETFEC_RDAR_2\n+};\n+\n+static const unsigned short offset_des_active_txq[] = {\n+\tENETFEC_TDAR_0, ENETFEC_TDAR_1, ENETFEC_TDAR_2\n+};\n+\n+static int\n+enetfec_tx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\tuint16_t queue_idx,\n+\t\t\tuint16_t nb_desc,\n+\t\t\tunsigned int socket_id __rte_unused,\n+\t\t\tconst struct rte_eth_txconf *tx_conf)\n+{\n+\tstruct enetfec_private *fep = dev->data->dev_private;\n+\tunsigned int i;\n+\tstruct bufdesc *bdp, *bd_base;\n+\tstruct enetfec_priv_tx_q *txq;\n+\tunsigned int size;\n+\tunsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :\n+\t\t\tsizeof(struct bufdesc);\n+\tunsigned int dsize_log2 = fls64(dsize);\n+\n+\t/* Tx deferred start is not supported */\n+\tif (tx_conf->tx_deferred_start) {\n+\t\tENETFEC_PMD_ERR(\"%p:Tx deferred start not supported\",\n+\t\t\t\t(void *)dev);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* allocate transmit queue */\n+\ttxq = rte_zmalloc(NULL, sizeof(*txq), RTE_CACHE_LINE_SIZE);\n+\tif (txq == NULL) {\n+\t\tENETFEC_PMD_ERR(\"transmit queue allocation failed\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (nb_desc > MAX_TX_BD_RING_SIZE) {\n+\t\tnb_desc = MAX_TX_BD_RING_SIZE;\n+\t\tENETFEC_PMD_WARN(\"modified the nb_desc to MAX_TX_BD_RING_SIZE\\n\");\n+\t}\n+\ttxq->bd.ring_size = nb_desc;\n+\tfep->total_tx_ring_size += txq->bd.ring_size;\n+\tfep->tx_queues[queue_idx] = txq;\n+\n+\trte_write32(rte_cpu_to_le_32(fep->bd_addr_p_t[queue_idx]),\n+\t\tfep->hw_baseaddr_v + ENETFEC_TD_START(queue_idx));\n+\n+\t/* Set transmit descriptor base. */\n+\ttxq = fep->tx_queues[queue_idx];\n+\ttxq->fep = fep;\n+\tsize = dsize * txq->bd.ring_size;\n+\tbd_base = (struct bufdesc *)fep->dma_baseaddr_t[queue_idx];\n+\ttxq->bd.que_id = queue_idx;\n+\ttxq->bd.base = bd_base;\n+\ttxq->bd.cur = bd_base;\n+\ttxq->bd.d_size = dsize;\n+\ttxq->bd.d_size_log2 = dsize_log2;\n+\ttxq->bd.active_reg_desc =\n+\t\t\tfep->hw_baseaddr_v + offset_des_active_txq[queue_idx];\n+\tbd_base = (struct bufdesc *)(((void *)bd_base) + size);\n+\ttxq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize);\n+\tbdp = txq->bd.base;\n+\tbdp = txq->bd.cur;\n+\n+\tfor (i = 0; i < txq->bd.ring_size; i++) {\n+\t\t/* Initialize the BD for every fragment in the page. */\n+\t\trte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);\n+\t\tif (txq->tx_mbuf[i] != NULL) {\n+\t\t\trte_pktmbuf_free(txq->tx_mbuf[i]);\n+\t\t\ttxq->tx_mbuf[i] = NULL;\n+\t\t}\n+\t\trte_write32(0, &bdp->bd_bufaddr);\n+\t\tbdp = enet_get_nextdesc(bdp, &txq->bd);\n+\t}\n+\n+\t/* Set the last buffer to wrap */\n+\tbdp = enet_get_prevdesc(bdp, &txq->bd);\n+\trte_write16((rte_cpu_to_le_16(TX_BD_WRAP) |\n+\t\t     rte_read16(&bdp->bd_sc)), &bdp->bd_sc);\n+\ttxq->dirty_tx = bdp;\n+\tdev->data->tx_queues[queue_idx] = fep->tx_queues[queue_idx];\n+\treturn 0;\n+}\n+\n+static int\n+enetfec_rx_queue_setup(struct rte_eth_dev *dev,\n+\t\t\tuint16_t queue_idx,\n+\t\t\tuint16_t nb_rx_desc,\n+\t\t\tunsigned int socket_id __rte_unused,\n+\t\t\tconst struct rte_eth_rxconf *rx_conf,\n+\t\t\tstruct rte_mempool *mb_pool)\n+{\n+\tstruct enetfec_private *fep = dev->data->dev_private;\n+\tunsigned int i;\n+\tstruct bufdesc *bd_base;\n+\tstruct bufdesc *bdp;\n+\tstruct enetfec_priv_rx_q *rxq;\n+\tunsigned int size;\n+\tunsigned int dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :\n+\t\t\tsizeof(struct bufdesc);\n+\tunsigned int dsize_log2 = fls64(dsize);\n+\n+\t/* Rx deferred start is not supported */\n+\tif (rx_conf->rx_deferred_start) {\n+\t\tENETFEC_PMD_ERR(\"%p:Rx deferred start not supported\",\n+\t\t\t\t(void *)dev);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* allocate receive queue */\n+\trxq = rte_zmalloc(NULL, sizeof(*rxq), RTE_CACHE_LINE_SIZE);\n+\tif (rxq == NULL) {\n+\t\tENETFEC_PMD_ERR(\"receive queue allocation failed\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tif (nb_rx_desc > MAX_RX_BD_RING_SIZE) {\n+\t\tnb_rx_desc = MAX_RX_BD_RING_SIZE;\n+\t\tENETFEC_PMD_WARN(\"modified the nb_desc to MAX_RX_BD_RING_SIZE\\n\");\n+\t}\n+\n+\trxq->bd.ring_size = nb_rx_desc;\n+\tfep->total_rx_ring_size += rxq->bd.ring_size;\n+\tfep->rx_queues[queue_idx] = rxq;\n+\n+\trte_write32(rte_cpu_to_le_32(fep->bd_addr_p_r[queue_idx]),\n+\t\t\tfep->hw_baseaddr_v + ENETFEC_RD_START(queue_idx));\n+\trte_write32(rte_cpu_to_le_32(PKT_MAX_BUF_SIZE),\n+\t\t\tfep->hw_baseaddr_v + ENETFEC_MRB_SIZE(queue_idx));\n+\n+\t/* Set receive descriptor base. */\n+\trxq = fep->rx_queues[queue_idx];\n+\trxq->pool = mb_pool;\n+\tsize = dsize * rxq->bd.ring_size;\n+\tbd_base = (struct bufdesc *)fep->dma_baseaddr_r[queue_idx];\n+\trxq->bd.que_id = queue_idx;\n+\trxq->bd.base = bd_base;\n+\trxq->bd.cur = bd_base;\n+\trxq->bd.d_size = dsize;\n+\trxq->bd.d_size_log2 = dsize_log2;\n+\trxq->bd.active_reg_desc =\n+\t\t\tfep->hw_baseaddr_v + offset_des_active_rxq[queue_idx];\n+\tbd_base = (struct bufdesc *)(((void *)bd_base) + size);\n+\trxq->bd.last = (struct bufdesc *)(((void *)bd_base) - dsize);\n+\n+\trxq->fep = fep;\n+\tbdp = rxq->bd.base;\n+\trxq->bd.cur = bdp;\n+\n+\tfor (i = 0; i < nb_rx_desc; i++) {\n+\t\t/* Initialize Rx buffers from pktmbuf pool */\n+\t\tstruct rte_mbuf *mbuf = rte_pktmbuf_alloc(mb_pool);\n+\t\tif (mbuf == NULL) {\n+\t\t\tENETFEC_PMD_ERR(\"mbuf failed\\n\");\n+\t\t\tgoto err_alloc;\n+\t\t}\n+\n+\t\t/* Get the virtual address & physical address */\n+\t\trte_write32(rte_cpu_to_le_32(rte_pktmbuf_iova(mbuf)),\n+\t\t\t\t&bdp->bd_bufaddr);\n+\n+\t\trxq->rx_mbuf[i] = mbuf;\n+\t\trte_write16(rte_cpu_to_le_16(RX_BD_EMPTY), &bdp->bd_sc);\n+\n+\t\tbdp = enet_get_nextdesc(bdp, &rxq->bd);\n+\t}\n+\n+\t/* Initialize the receive buffer descriptors. */\n+\tbdp = rxq->bd.cur;\n+\tfor (i = 0; i < rxq->bd.ring_size; i++) {\n+\t\t/* Initialize the BD for every fragment in the page. */\n+\t\tif (rte_read32(&bdp->bd_bufaddr) > 0)\n+\t\t\trte_write16(rte_cpu_to_le_16(RX_BD_EMPTY),\n+\t\t\t\t&bdp->bd_sc);\n+\t\telse\n+\t\t\trte_write16(rte_cpu_to_le_16(0), &bdp->bd_sc);\n+\n+\t\tbdp = enet_get_nextdesc(bdp, &rxq->bd);\n+\t}\n+\n+\t/* Set the last buffer to wrap */\n+\tbdp = enet_get_prevdesc(bdp, &rxq->bd);\n+\trte_write16((rte_cpu_to_le_16(RX_BD_WRAP) |\n+\t\t     rte_read16(&bdp->bd_sc)),\t&bdp->bd_sc);\n+\tdev->data->rx_queues[queue_idx] = fep->rx_queues[queue_idx];\n+\trte_write32(0, fep->rx_queues[queue_idx]->bd.active_reg_desc);\n+\treturn 0;\n+\n+err_alloc:\n+\tfor (i = 0; i < nb_rx_desc; i++) {\n+\t\tif (rxq->rx_mbuf[i] != NULL) {\n+\t\t\trte_pktmbuf_free(rxq->rx_mbuf[i]);\n+\t\t\trxq->rx_mbuf[i] = NULL;\n+\t\t}\n+\t}\n+\trte_free(rxq);\n+\treturn errno;\n+}\n+\n static const struct eth_dev_ops enetfec_ops = {\n \t.dev_configure          = enetfec_eth_configure,\n \t.dev_start\t\t= enetfec_eth_start,\n-\t.dev_stop\t\t= enetfec_eth_stop\n+\t.dev_stop\t\t= enetfec_eth_stop,\n+\t.dev_infos_get          = enetfec_eth_info,\n+\t.rx_queue_setup\t\t= enetfec_rx_queue_setup,\n+\t.tx_queue_setup\t\t= enetfec_tx_queue_setup\n };\n \n static int\n",
    "prefixes": [
        "v2",
        "3/5"
    ]
}