get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/59163/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 59163,
    "url": "https://patches.dpdk.org/api/patches/59163/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20190912152416.2990-3-shshaikh@marvell.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20190912152416.2990-3-shshaikh@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20190912152416.2990-3-shshaikh@marvell.com",
    "date": "2019-09-12T15:24:13",
    "name": "[v2,2/5] net/qede: fix ovs-dpdk failure when using odd number of queues on 100Gb mode",
    "commit_ref": null,
    "pull_url": null,
    "state": "accepted",
    "archived": true,
    "hash": "05277e60a8b913ac423f0b948aeaf33bc82be42d",
    "submitter": {
        "id": 1210,
        "url": "https://patches.dpdk.org/api/people/1210/?format=api",
        "name": "Shahed Shaikh",
        "email": "shshaikh@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "https://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20190912152416.2990-3-shshaikh@marvell.com/mbox/",
    "series": [
        {
            "id": 6390,
            "url": "https://patches.dpdk.org/api/series/6390/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=6390",
            "date": "2019-09-12T15:24:11",
            "name": "net/qede: fixes and enhancement",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/6390/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/59163/comments/",
    "check": "success",
    "checks": "https://patches.dpdk.org/api/patches/59163/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@dpdk.org",
        "Delivered-To": "patchwork@dpdk.org",
        "Received": [
            "from [92.243.14.124] (localhost [127.0.0.1])\n\tby dpdk.org (Postfix) with ESMTP id 1BE341EB0A;\n\tThu, 12 Sep 2019 17:24:42 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0a-0016f401.pphosted.com\n\t[67.231.148.174]) by dpdk.org (Postfix) with ESMTP id 776A11EAD6;\n\tThu, 12 Sep 2019 17:24:40 +0200 (CEST)",
            "from pps.filterd (m0045849.ppops.net [127.0.0.1])\n\tby mx0a-0016f401.pphosted.com (8.16.0.42/8.16.0.42) with SMTP id\n\tx8CFKPPq022298; Thu, 12 Sep 2019 08:24:39 -0700",
            "from sc-exch01.marvell.com ([199.233.58.181])\n\tby mx0a-0016f401.pphosted.com with ESMTP id 2uxshkg0en-1\n\t(version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT); \n\tThu, 12 Sep 2019 08:24:39 -0700",
            "from SC-EXCH03.marvell.com (10.93.176.83) by SC-EXCH01.marvell.com\n\t(10.93.176.81) with Microsoft SMTP Server (TLS) id 15.0.1367.3;\n\tThu, 12 Sep 2019 08:24:37 -0700",
            "from maili.marvell.com (10.93.176.43) by SC-EXCH03.marvell.com\n\t(10.93.176.83) with Microsoft SMTP Server id 15.0.1367.3 via Frontend\n\tTransport; Thu, 12 Sep 2019 08:24:37 -0700",
            "from dut1171.mv.qlogic.com (unknown [10.112.88.18])\n\tby maili.marvell.com (Postfix) with ESMTP id 4B6273F703F;\n\tThu, 12 Sep 2019 08:24:37 -0700 (PDT)",
            "from dut1171.mv.qlogic.com (localhost [127.0.0.1])\n\tby dut1171.mv.qlogic.com (8.14.7/8.14.7) with ESMTP id x8CFObo9003067;\n\tThu, 12 Sep 2019 08:24:37 -0700",
            "(from root@localhost)\n\tby dut1171.mv.qlogic.com (8.14.7/8.14.7/Submit) id x8CFOb5q003066;\n\tThu, 12 Sep 2019 08:24:37 -0700"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n\th=from : to : cc :\n\tsubject : date : message-id : in-reply-to : references : mime-version\n\t: content-type; s=pfpt0818;\n\tbh=vMAS9CKpGPzBmZvHQbsisNHHizIuu5Gzs74uyey0fpY=; \n\tb=d4yFLD6PUb8PIh9F+leZcmfOW/KM4PsCvIhgkHMAIUMZS6XW03Am++ZiZrh0WFwSv/bW\n\tJzFQmEVfMAKuETbuYfPbgL2tt8+lU6V+wRzFSB9rkzN6IxzFe79wGYGpD+xw/xSi2KFC\n\tno1lIMmBP3+a1wRL0lxweAxwhFkW3CBRxFbDm2vVJE5q1QRN24X3RetjnPFQ2kwUNaxq\n\tYBtKOOAYQ8ZCLNPfNzgHkUqf7SwZHQE7s/HFcX+P+yiC82XIvVWZSfc2EsrEtNXTrkHe\n\tnUEQKrJM+nvi4hbiqt2Ou6z3YwwYmDSpyL+LCT3kkQXzmQ2PHZEdTX+hhcZ3LBi9IcHK\n\tgg== ",
        "From": "Shahed Shaikh <shshaikh@marvell.com>",
        "To": "<dev@dpdk.org>",
        "CC": "<rmody@marvell.com>, <jerinj@marvell.com>,\n\t<GR-Everest-DPDK-Dev@marvell.com>, <stable@dpdk.org>",
        "Date": "Thu, 12 Sep 2019 08:24:13 -0700",
        "Message-ID": "<20190912152416.2990-3-shshaikh@marvell.com>",
        "X-Mailer": "git-send-email 2.12.0",
        "In-Reply-To": "<20190912152416.2990-1-shshaikh@marvell.com>",
        "References": "<20190912152416.2990-1-shshaikh@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-Virus-Version": "vendor=fsecure engine=2.50.10434:6.0.70,1.0.8\n\tdefinitions=2019-09-12_08:2019-09-11,2019-09-12 signatures=0",
        "Subject": "[dpdk-dev] [PATCH v2 2/5] net/qede: fix ovs-dpdk failure when using\n\todd number of queues on 100Gb mode",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.15",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n\t<mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n\t<mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org",
        "Sender": "\"dev\" <dev-bounces@dpdk.org>"
    },
    "content": "As per HW design of 100Gb mode, device internally uses 2 engines\n(eng0 and eng1), and both engines need to be configured symmetrically.\nBased on this requirement, driver design chose an approach\nto allow user to allocate only even number of queues and split\nthose queues on both engines equally.\n\nThis approach puts a limitation on number of queues to be allocated -\ni.e. user can't configure odd number of queues on 100Gb mode.\nOVS configures DPDK port with 1 rxq and 1 txq, which causes initialization\nof qede port to fail.\n\nIssue is fixed by changing the implementation of queue allocation and\nassignment to hw engines only for 100Gb devices and allowing user to\nconfigure odd number queues.\n\nNew approach works as below -\n- Create 'struct qede_fastpath_cmt' to hold hw queue pair of both engines\n  and provide it to rte_ethdev's Rx/Tx queues structure.\n- So ethdev will see only one queue for underlying queue pair created for\n  hw engine pair.\n- Install separate Rx/Tx data path handlers for 100Gb mode and regular mode\n- Rx/Tx handlers for 100Gb mode will split packet processing across both\n  engines by providing hw queue structures from 'struct qede_fastpath_cmt'\n  passed by Rx/Tx callbacks to respective engines.\n\nFixes: 2af14ca79c0a (\"net/qede: support 100G\")\nCc: stable@dpdk.org\n\nSigned-off-by: Shahed Shaikh <shshaikh@marvell.com>\n---\n drivers/net/qede/qede_ethdev.c | 112 ++++++++++++-----------\n drivers/net/qede/qede_ethdev.h |   5 +-\n drivers/net/qede/qede_filter.c |   5 +-\n drivers/net/qede/qede_rxtx.c   | 161 +++++++++++++++++++++++++++------\n drivers/net/qede/qede_rxtx.h   |  26 +++++-\n 5 files changed, 219 insertions(+), 90 deletions(-)",
    "diff": "diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c\nindex 528b33e8c..308588cb8 100644\n--- a/drivers/net/qede/qede_ethdev.c\n+++ b/drivers/net/qede/qede_ethdev.c\n@@ -304,6 +304,7 @@ static void qede_print_adapter_info(struct qede_dev *qdev)\n \n static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)\n {\n+\tstruct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;\n \tstruct ecore_dev *edev = QEDE_INIT_EDEV(qdev);\n \tunsigned int i = 0, j = 0, qid;\n \tunsigned int rxq_stat_cntrs, txq_stat_cntrs;\n@@ -311,12 +312,12 @@ static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)\n \n \tDP_VERBOSE(edev, ECORE_MSG_DEBUG, \"Clearing queue stats\\n\");\n \n-\trxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),\n+\trxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),\n \t\t\t       RTE_ETHDEV_QUEUE_STAT_CNTRS);\n-\ttxq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),\n+\ttxq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(dev),\n \t\t\t       RTE_ETHDEV_QUEUE_STAT_CNTRS);\n \n-\tfor_each_rss(qid) {\n+\tfor (qid = 0; qid < qdev->num_rx_queues; qid++) {\n \t\tOSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) +\n \t\t\t     offsetof(struct qede_rx_queue, rcv_pkts), 0,\n \t\t\t    sizeof(uint64_t));\n@@ -342,7 +343,7 @@ static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats)\n \n \ti = 0;\n \n-\tfor_each_tss(qid) {\n+\tfor (qid = 0; qid < qdev->num_tx_queues; qid++) {\n \t\ttxq = qdev->fp_array[qid].txq;\n \n \t\tOSAL_MEMSET((uint64_t *)(uintptr_t)\n@@ -991,7 +992,7 @@ int qede_config_rss(struct rte_eth_dev *eth_dev)\n \tfor (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {\n \t\tid = i / RTE_RETA_GROUP_SIZE;\n \t\tpos = i % RTE_RETA_GROUP_SIZE;\n-\t\tq = i % QEDE_RSS_COUNT(qdev);\n+\t\tq = i % QEDE_RSS_COUNT(eth_dev);\n \t\treta_conf[id].reta[pos] = q;\n \t}\n \tif (qede_rss_reta_update(eth_dev, &reta_conf[0],\n@@ -1165,22 +1166,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)\n \n \tPMD_INIT_FUNC_TRACE(edev);\n \n-\t/* Check requirements for 100G mode */\n-\tif (ECORE_IS_CMT(edev)) {\n-\t\tif (eth_dev->data->nb_rx_queues < 2 ||\n-\t\t    eth_dev->data->nb_tx_queues < 2) {\n-\t\t\tDP_ERR(edev, \"100G mode needs min. 2 RX/TX queues\\n\");\n-\t\t\treturn -EINVAL;\n-\t\t}\n-\n-\t\tif ((eth_dev->data->nb_rx_queues % 2 != 0) ||\n-\t\t    (eth_dev->data->nb_tx_queues % 2 != 0)) {\n-\t\t\tDP_ERR(edev,\n-\t\t\t       \"100G mode needs even no. of RX/TX queues\\n\");\n-\t\t\treturn -EINVAL;\n-\t\t}\n-\t}\n-\n \t/* We need to have min 1 RX queue.There is no min check in\n \t * rte_eth_dev_configure(), so we are checking it here.\n \t */\n@@ -1207,8 +1192,9 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)\n \t\treturn -ENOTSUP;\n \n \tqede_dealloc_fp_resc(eth_dev);\n-\tqdev->num_tx_queues = eth_dev->data->nb_tx_queues;\n-\tqdev->num_rx_queues = eth_dev->data->nb_rx_queues;\n+\tqdev->num_tx_queues = eth_dev->data->nb_tx_queues * edev->num_hwfns;\n+\tqdev->num_rx_queues = eth_dev->data->nb_rx_queues * edev->num_hwfns;\n+\n \tif (qede_alloc_fp_resc(qdev))\n \t\treturn -ENOMEM;\n \n@@ -1233,7 +1219,12 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)\n \t\treturn ret;\n \n \tDP_INFO(edev, \"Device configured with RSS=%d TSS=%d\\n\",\n-\t\t\tQEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev));\n+\t\t\tQEDE_RSS_COUNT(eth_dev), QEDE_TSS_COUNT(eth_dev));\n+\n+\tif (ECORE_IS_CMT(edev))\n+\t\tDP_INFO(edev, \"Actual HW queues for CMT mode - RX = %d TX = %d\\n\",\n+\t\t\tqdev->num_rx_queues, qdev->num_tx_queues);\n+\n \n \treturn 0;\n }\n@@ -1275,6 +1266,10 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,\n \telse\n \t\tdev_info->max_rx_queues = (uint16_t)RTE_MIN(\n \t\t\tQEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);\n+\t/* Since CMT mode internally doubles the number of queues */\n+\tif (ECORE_IS_CMT(edev))\n+\t\tdev_info->max_rx_queues  = dev_info->max_rx_queues / 2;\n+\n \tdev_info->max_tx_queues = dev_info->max_rx_queues;\n \n \tdev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;\n@@ -1518,18 +1513,18 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)\n \teth_stats->oerrors = stats.common.tx_err_drop_pkts;\n \n \t/* Queue stats */\n-\trxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),\n+\trxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(eth_dev),\n \t\t\t       RTE_ETHDEV_QUEUE_STAT_CNTRS);\n-\ttxq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),\n+\ttxq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(eth_dev),\n \t\t\t       RTE_ETHDEV_QUEUE_STAT_CNTRS);\n-\tif ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||\n-\t    (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))\n+\tif (rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(eth_dev) ||\n+\t    txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(eth_dev))\n \t\tDP_VERBOSE(edev, ECORE_MSG_DEBUG,\n \t\t       \"Not all the queue stats will be displayed. Set\"\n \t\t       \" RTE_ETHDEV_QUEUE_STAT_CNTRS config param\"\n \t\t       \" appropriately and retry.\\n\");\n \n-\tfor_each_rss(qid) {\n+\tfor (qid = 0; qid < eth_dev->data->nb_rx_queues; qid++) {\n \t\teth_stats->q_ipackets[i] =\n \t\t\t*(uint64_t *)(\n \t\t\t\t((char *)(qdev->fp_array[qid].rxq)) +\n@@ -1549,7 +1544,7 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)\n \t\t\tbreak;\n \t}\n \n-\tfor_each_tss(qid) {\n+\tfor (qid = 0; qid < eth_dev->data->nb_tx_queues; qid++) {\n \t\ttxq = qdev->fp_array[qid].txq;\n \t\teth_stats->q_opackets[j] =\n \t\t\t*((uint64_t *)(uintptr_t)\n@@ -1566,18 +1561,18 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)\n \n static unsigned\n qede_get_xstats_count(struct qede_dev *qdev) {\n+\tstruct rte_eth_dev *dev = (struct rte_eth_dev *)qdev->ethdev;\n+\n \tif (ECORE_IS_BB(&qdev->edev))\n \t\treturn RTE_DIM(qede_xstats_strings) +\n \t\t       RTE_DIM(qede_bb_xstats_strings) +\n \t\t       (RTE_DIM(qede_rxq_xstats_strings) *\n-\t\t\tRTE_MIN(QEDE_RSS_COUNT(qdev),\n-\t\t\t\tRTE_ETHDEV_QUEUE_STAT_CNTRS));\n+\t\t\tQEDE_RSS_COUNT(dev) * qdev->edev.num_hwfns);\n \telse\n \t\treturn RTE_DIM(qede_xstats_strings) +\n \t\t       RTE_DIM(qede_ah_xstats_strings) +\n \t\t       (RTE_DIM(qede_rxq_xstats_strings) *\n-\t\t\tRTE_MIN(QEDE_RSS_COUNT(qdev),\n-\t\t\t\tRTE_ETHDEV_QUEUE_STAT_CNTRS));\n+\t\t\tQEDE_RSS_COUNT(dev));\n }\n \n static int\n@@ -1615,7 +1610,7 @@ qede_get_xstats_names(struct rte_eth_dev *dev,\n \t\t\t}\n \t\t}\n \n-\t\trxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),\n+\t\trxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),\n \t\t\t\t\t RTE_ETHDEV_QUEUE_STAT_CNTRS);\n \t\tfor (qid = 0; qid < rxq_stat_cntrs; qid++) {\n \t\t\tfor (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {\n@@ -1673,17 +1668,15 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,\n \t\t}\n \t}\n \n-\trxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev),\n+\trxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(dev),\n \t\t\t\t RTE_ETHDEV_QUEUE_STAT_CNTRS);\n \tfor (qid = 0; qid < rxq_stat_cntrs; qid++) {\n-\t\tfor_each_rss(qid) {\n-\t\t\tfor (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {\n-\t\t\t\txstats[stat_idx].value = *(uint64_t *)(\n-\t\t\t\t\t((char *)(qdev->fp_array[qid].rxq)) +\n-\t\t\t\t\t qede_rxq_xstats_strings[i].offset);\n-\t\t\t\txstats[stat_idx].id = stat_idx;\n-\t\t\t\tstat_idx++;\n-\t\t\t}\n+\t\tfor (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) {\n+\t\t\txstats[stat_idx].value = *(uint64_t *)\n+\t\t\t\t(((char *)(qdev->fp_array[qid].rxq)) +\n+\t\t\t\t qede_rxq_xstats_strings[i].offset);\n+\t\t\txstats[stat_idx].id = stat_idx;\n+\t\t\tstat_idx++;\n \t\t}\n \t}\n \n@@ -1938,7 +1931,8 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)\n \t\tRTE_PTYPE_UNKNOWN\n \t};\n \n-\tif (eth_dev->rx_pkt_burst == qede_recv_pkts)\n+\tif (eth_dev->rx_pkt_burst == qede_recv_pkts ||\n+\t    eth_dev->rx_pkt_burst == qede_recv_pkts_cmt)\n \t\treturn ptypes;\n \n \treturn NULL;\n@@ -2005,7 +1999,7 @@ int qede_rss_hash_update(struct rte_eth_dev *eth_dev,\n \tvport_update_params.vport_id = 0;\n \t/* pass the L2 handles instead of qids */\n \tfor (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {\n-\t\tidx = i % QEDE_RSS_COUNT(qdev);\n+\t\tidx = i % QEDE_RSS_COUNT(eth_dev);\n \t\trss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;\n \t}\n \tvport_update_params.rss_params = &rss_params;\n@@ -2257,7 +2251,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)\n \tqdev->mtu = mtu;\n \n \t/* Fix up RX buf size for all queues of the port */\n-\tfor_each_rss(i) {\n+\tfor (i = 0; i < qdev->num_rx_queues; i++) {\n \t\tfp = &qdev->fp_array[i];\n \t\tif (fp->rxq != NULL) {\n \t\t\tbufsz = (uint16_t)rte_pktmbuf_data_room_size(\n@@ -2286,9 +2280,13 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)\n \t/* update max frame size */\n \tdev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;\n \t/* Reassign back */\n-\tdev->rx_pkt_burst = qede_recv_pkts;\n-\tdev->tx_pkt_burst = qede_xmit_pkts;\n-\n+\tif (ECORE_IS_CMT(edev)) {\n+\t\tdev->rx_pkt_burst = qede_recv_pkts_cmt;\n+\t\tdev->tx_pkt_burst = qede_xmit_pkts_cmt;\n+\t} else {\n+\t\tdev->rx_pkt_burst = qede_recv_pkts;\n+\t\tdev->tx_pkt_burst = qede_xmit_pkts;\n+\t}\n \treturn 0;\n }\n \n@@ -2429,10 +2427,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)\n \t\t pci_addr.bus, pci_addr.devid, pci_addr.function,\n \t\t eth_dev->data->port_id);\n \n-\teth_dev->rx_pkt_burst = qede_recv_pkts;\n-\teth_dev->tx_pkt_burst = qede_xmit_pkts;\n-\teth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;\n-\n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY) {\n \t\tDP_ERR(edev, \"Skipping device init from secondary process\\n\");\n \t\treturn 0;\n@@ -2490,6 +2484,16 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)\n \tstrncpy((char *)params.name, QEDE_PMD_VER_PREFIX,\n \t\tQEDE_PMD_DRV_VER_STR_SIZE);\n \n+\tif (ECORE_IS_CMT(edev)) {\n+\t\teth_dev->rx_pkt_burst = qede_recv_pkts_cmt;\n+\t\teth_dev->tx_pkt_burst = qede_xmit_pkts_cmt;\n+\t} else {\n+\t\teth_dev->rx_pkt_burst = qede_recv_pkts;\n+\t\teth_dev->tx_pkt_burst = qede_xmit_pkts;\n+\t}\n+\n+\teth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;\n+\n \t/* For CMT mode device do periodic polling for slowpath events.\n \t * This is required since uio device uses only one MSI-x\n \t * interrupt vector but we need one for each engine.\ndiff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h\nindex d0e7c70be..5549d0bf3 100644\n--- a/drivers/net/qede/qede_ethdev.h\n+++ b/drivers/net/qede/qede_ethdev.h\n@@ -66,8 +66,8 @@\n \t\t\t\t\t(edev)->dev_info.num_tc)\n \n #define QEDE_QUEUE_CNT(qdev) ((qdev)->num_queues)\n-#define QEDE_RSS_COUNT(qdev) ((qdev)->num_rx_queues)\n-#define QEDE_TSS_COUNT(qdev) ((qdev)->num_tx_queues)\n+#define QEDE_RSS_COUNT(dev) ((dev)->data->nb_rx_queues)\n+#define QEDE_TSS_COUNT(dev) ((dev)->data->nb_tx_queues)\n \n #define QEDE_DUPLEX_FULL\t1\n #define QEDE_DUPLEX_HALF\t2\n@@ -215,6 +215,7 @@ struct qede_dev {\n \tstruct qed_dev_eth_info dev_info;\n \tstruct ecore_sb_info *sb_array;\n \tstruct qede_fastpath *fp_array;\n+\tstruct qede_fastpath_cmt *fp_array_cmt;\n \tuint16_t mtu;\n \tbool enable_tx_switching;\n \tbool rss_enable;\ndiff --git a/drivers/net/qede/qede_filter.c b/drivers/net/qede/qede_filter.c\nindex b3f62e0dd..81509f04b 100644\n--- a/drivers/net/qede/qede_filter.c\n+++ b/drivers/net/qede/qede_filter.c\n@@ -431,7 +431,7 @@ qede_fdir_filter_add(struct rte_eth_dev *eth_dev,\n \t\treturn -EINVAL;\n \t}\n \n-\tif (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {\n+\tif (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) {\n \t\tDP_ERR(edev, \"invalid queue number %u\\n\",\n \t\t       fdir->action.rx_queue);\n \t\treturn -EINVAL;\n@@ -1345,7 +1345,6 @@ qede_flow_parse_actions(struct rte_eth_dev *dev,\n \t\t\tstruct rte_flow_error *error,\n \t\t\tstruct rte_flow *flow)\n {\n-\tstruct qede_dev *qdev = QEDE_INIT_QDEV(dev);\n \tconst struct rte_flow_action_queue *queue;\n \n \tif (actions == NULL) {\n@@ -1360,7 +1359,7 @@ qede_flow_parse_actions(struct rte_eth_dev *dev,\n \t\tcase RTE_FLOW_ACTION_TYPE_QUEUE:\n \t\t\tqueue = actions->conf;\n \n-\t\t\tif (queue->index >= QEDE_RSS_COUNT(qdev)) {\n+\t\t\tif (queue->index >= QEDE_RSS_COUNT(dev)) {\n \t\t\t\trte_flow_error_set(error, EINVAL,\n \t\t\t\t\t\t   RTE_FLOW_ERROR_TYPE_ACTION,\n \t\t\t\t\t\t   actions,\ndiff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c\nindex cb8ac9bf6..dbb74fc64 100644\n--- a/drivers/net/qede/qede_rxtx.c\n+++ b/drivers/net/qede/qede_rxtx.c\n@@ -260,13 +260,30 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,\n \n \tbufsz = rc;\n \n-\trxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,\n-\t\t\t\t      socket_id, mp, bufsz);\n-\tif (!rxq)\n-\t\treturn -ENOMEM;\n+\tif (ECORE_IS_CMT(edev)) {\n+\t\trxq = qede_alloc_rx_queue_mem(dev, qid * 2, nb_desc,\n+\t\t\t\t\t      socket_id, mp, bufsz);\n+\t\tif (!rxq)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tqdev->fp_array[qid * 2].rxq = rxq;\n+\t\trxq = qede_alloc_rx_queue_mem(dev, qid * 2 + 1, nb_desc,\n+\t\t\t\t\t      socket_id, mp, bufsz);\n+\t\tif (!rxq)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tqdev->fp_array[qid * 2 + 1].rxq = rxq;\n+\t\t/* provide per engine fp struct as rx queue */\n+\t\tdev->data->rx_queues[qid] = &qdev->fp_array_cmt[qid];\n+\t} else {\n+\t\trxq = qede_alloc_rx_queue_mem(dev, qid, nb_desc,\n+\t\t\t\t\t      socket_id, mp, bufsz);\n+\t\tif (!rxq)\n+\t\t\treturn -ENOMEM;\n \n-\tdev->data->rx_queues[qid] = rxq;\n-\tqdev->fp_array[qid].rxq = rxq;\n+\t\tdev->data->rx_queues[qid] = rxq;\n+\t\tqdev->fp_array[qid].rxq = rxq;\n+\t}\n \n \tDP_INFO(edev, \"rxq %d num_desc %u rx_buf_size=%u socket %u\\n\",\n \t\t  qid, nb_desc, rxq->rx_buf_size, socket_id);\n@@ -314,6 +331,7 @@ static void _qede_rx_queue_release(struct qede_dev *qdev,\n void qede_rx_queue_release(void *rx_queue)\n {\n \tstruct qede_rx_queue *rxq = rx_queue;\n+\tstruct qede_fastpath_cmt *fp_cmt;\n \tstruct qede_dev *qdev;\n \tstruct ecore_dev *edev;\n \n@@ -321,7 +339,13 @@ void qede_rx_queue_release(void *rx_queue)\n \t\tqdev = rxq->qdev;\n \t\tedev = QEDE_INIT_EDEV(qdev);\n \t\tPMD_INIT_FUNC_TRACE(edev);\n-\t\t_qede_rx_queue_release(qdev, edev, rxq);\n+\t\tif (ECORE_IS_CMT(edev)) {\n+\t\t\tfp_cmt = rx_queue;\n+\t\t\t_qede_rx_queue_release(qdev, edev, fp_cmt->fp0->rxq);\n+\t\t\t_qede_rx_queue_release(qdev, edev, fp_cmt->fp1->rxq);\n+\t\t} else {\n+\t\t\t_qede_rx_queue_release(qdev, edev, rxq);\n+\t\t}\n \t}\n }\n \n@@ -454,13 +478,30 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,\n \t\tdev->data->tx_queues[queue_idx] = NULL;\n \t}\n \n-\ttxq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,\n-\t\t\t\t      socket_id, tx_conf);\n-\tif (!txq)\n-\t\treturn -ENOMEM;\n+\tif (ECORE_IS_CMT(edev)) {\n+\t\ttxq = qede_alloc_tx_queue_mem(dev, queue_idx * 2, nb_desc,\n+\t\t\t\t\t      socket_id, tx_conf);\n+\t\tif (!txq)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tqdev->fp_array[queue_idx * 2].txq = txq;\n+\t\ttxq = qede_alloc_tx_queue_mem(dev, (queue_idx * 2) + 1, nb_desc,\n+\t\t\t\t\t      socket_id, tx_conf);\n+\t\tif (!txq)\n+\t\t\treturn -ENOMEM;\n+\n+\t\tqdev->fp_array[(queue_idx * 2) + 1].txq = txq;\n+\t\tdev->data->tx_queues[queue_idx] =\n+\t\t\t\t\t&qdev->fp_array_cmt[queue_idx];\n+\t} else {\n+\t\ttxq = qede_alloc_tx_queue_mem(dev, queue_idx, nb_desc,\n+\t\t\t\t\t      socket_id, tx_conf);\n+\t\tif (!txq)\n+\t\t\treturn -ENOMEM;\n \n-\tdev->data->tx_queues[queue_idx] = txq;\n-\tqdev->fp_array[queue_idx].txq = txq;\n+\t\tdev->data->tx_queues[queue_idx] = txq;\n+\t\tqdev->fp_array[queue_idx].txq = txq;\n+\t}\n \n \treturn 0;\n }\n@@ -503,6 +544,7 @@ static void _qede_tx_queue_release(struct qede_dev *qdev,\n void qede_tx_queue_release(void *tx_queue)\n {\n \tstruct qede_tx_queue *txq = tx_queue;\n+\tstruct qede_fastpath_cmt *fp_cmt;\n \tstruct qede_dev *qdev;\n \tstruct ecore_dev *edev;\n \n@@ -510,7 +552,14 @@ void qede_tx_queue_release(void *tx_queue)\n \t\tqdev = txq->qdev;\n \t\tedev = QEDE_INIT_EDEV(qdev);\n \t\tPMD_INIT_FUNC_TRACE(edev);\n-\t\t_qede_tx_queue_release(qdev, edev, txq);\n+\n+\t\tif (ECORE_IS_CMT(edev)) {\n+\t\t\tfp_cmt = tx_queue;\n+\t\t\t_qede_tx_queue_release(qdev, edev, fp_cmt->fp0->txq);\n+\t\t\t_qede_tx_queue_release(qdev, edev, fp_cmt->fp1->txq);\n+\t\t} else {\n+\t\t\t_qede_tx_queue_release(qdev, edev, txq);\n+\t\t}\n \t}\n }\n \n@@ -548,6 +597,7 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)\n \tstruct qede_fastpath *fp;\n \tuint32_t num_sbs;\n \tuint16_t sb_idx;\n+\tint i;\n \n \tif (IS_VF(edev))\n \t\tecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);\n@@ -571,6 +621,28 @@ int qede_alloc_fp_resc(struct qede_dev *qdev)\n \tmemset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *\n \t\t\tsizeof(*qdev->fp_array));\n \n+\tif (ECORE_IS_CMT(edev)) {\n+\t\tqdev->fp_array_cmt = rte_calloc(\"fp_cmt\",\n+\t\t\t\t\t\tQEDE_RXTX_MAX(qdev) / 2,\n+\t\t\t\t\t\tsizeof(*qdev->fp_array_cmt),\n+\t\t\t\t\t\tRTE_CACHE_LINE_SIZE);\n+\n+\t\tif (!qdev->fp_array_cmt) {\n+\t\t\tDP_ERR(edev, \"fp array for CMT allocation failed\\n\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tmemset((void *)qdev->fp_array_cmt, 0,\n+\t\t       (QEDE_RXTX_MAX(qdev) / 2) * sizeof(*qdev->fp_array_cmt));\n+\n+\t\t/* Establish the mapping of fp_array with fp_array_cmt */\n+\t\tfor (i = 0; i < QEDE_RXTX_MAX(qdev) / 2; i++) {\n+\t\t\tqdev->fp_array_cmt[i].qdev = qdev;\n+\t\t\tqdev->fp_array_cmt[i].fp0 = &qdev->fp_array[i * 2];\n+\t\t\tqdev->fp_array_cmt[i].fp1 = &qdev->fp_array[i * 2 + 1];\n+\t\t}\n+\t}\n+\n \tfor (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {\n \t\tfp = &qdev->fp_array[sb_idx];\n \t\tif (!fp)\n@@ -635,6 +707,10 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)\n \tif (qdev->fp_array)\n \t\trte_free(qdev->fp_array);\n \tqdev->fp_array = NULL;\n+\n+\tif (qdev->fp_array_cmt)\n+\t\trte_free(qdev->fp_array_cmt);\n+\tqdev->fp_array_cmt = NULL;\n }\n \n static inline void\n@@ -686,9 +762,9 @@ qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)\n \tint hwfn_index;\n \tint rc;\n \n-\tif (rx_queue_id < eth_dev->data->nb_rx_queues) {\n+\tif (rx_queue_id < qdev->num_rx_queues) {\n \t\tfp = &qdev->fp_array[rx_queue_id];\n-\t\trxq = eth_dev->data->rx_queues[rx_queue_id];\n+\t\trxq = fp->rxq;\n \t\t/* Allocate buffers for the Rx ring */\n \t\tfor (j = 0; j < rxq->nb_rx_desc; j++) {\n \t\t\trc = qede_alloc_rx_buffer(rxq);\n@@ -757,9 +833,9 @@ qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)\n \tint hwfn_index;\n \tint rc;\n \n-\tif (tx_queue_id < eth_dev->data->nb_tx_queues) {\n-\t\ttxq = eth_dev->data->tx_queues[tx_queue_id];\n+\tif (tx_queue_id < qdev->num_tx_queues) {\n \t\tfp = &qdev->fp_array[tx_queue_id];\n+\t\ttxq = fp->txq;\n \t\tmemset(&params, 0, sizeof(params));\n \t\tparams.queue_id = tx_queue_id / edev->num_hwfns;\n \t\tparams.vport_id = 0;\n@@ -900,8 +976,8 @@ static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)\n \tint hwfn_index;\n \tint rc;\n \n-\tif (tx_queue_id < eth_dev->data->nb_tx_queues) {\n-\t\ttxq = eth_dev->data->tx_queues[tx_queue_id];\n+\tif (tx_queue_id < qdev->num_tx_queues) {\n+\t\ttxq = qdev->fp_array[tx_queue_id].txq;\n \t\t/* Drain txq */\n \t\tif (qede_drain_txq(qdev, txq, true))\n \t\t\treturn -1; /* For the lack of retcodes */\n@@ -932,13 +1008,13 @@ int qede_start_queues(struct rte_eth_dev *eth_dev)\n \tuint8_t id;\n \tint rc = -1;\n \n-\tfor_each_rss(id) {\n+\tfor (id = 0; id < qdev->num_rx_queues; id++) {\n \t\trc = qede_rx_queue_start(eth_dev, id);\n \t\tif (rc != ECORE_SUCCESS)\n \t\t\treturn -1;\n \t}\n \n-\tfor_each_tss(id) {\n+\tfor (id = 0; id < qdev->num_tx_queues; id++) {\n \t\trc = qede_tx_queue_start(eth_dev, id);\n \t\tif (rc != ECORE_SUCCESS)\n \t\t\treturn -1;\n@@ -953,13 +1029,11 @@ void qede_stop_queues(struct rte_eth_dev *eth_dev)\n \tuint8_t id;\n \n \t/* Stopping RX/TX queues */\n-\tfor_each_tss(id) {\n+\tfor (id = 0; id < qdev->num_tx_queues; id++)\n \t\tqede_tx_queue_stop(eth_dev, id);\n-\t}\n \n-\tfor_each_rss(id) {\n+\tfor (id = 0; id < qdev->num_rx_queues; id++)\n \t\tqede_rx_queue_stop(eth_dev, id);\n-\t}\n }\n \n static inline bool qede_tunn_exist(uint16_t flag)\n@@ -1741,6 +1815,23 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n \treturn rx_pkt;\n }\n \n+uint16_t\n+qede_recv_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct qede_fastpath_cmt *fp_cmt = p_fp_cmt;\n+\tuint16_t eng0_pkts, eng1_pkts;\n+\n+\teng0_pkts = nb_pkts / 2;\n+\n+\teng0_pkts = qede_recv_pkts(fp_cmt->fp0->rxq, rx_pkts, eng0_pkts);\n+\n+\teng1_pkts = nb_pkts - eng0_pkts;\n+\n+\teng1_pkts = qede_recv_pkts(fp_cmt->fp1->rxq, rx_pkts + eng0_pkts,\n+\t\t\t\t   eng1_pkts);\n+\n+\treturn eng0_pkts + eng1_pkts;\n+}\n \n /* Populate scatter gather buffer descriptor fields */\n static inline uint16_t\n@@ -2263,6 +2354,24 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n \treturn nb_pkt_sent;\n }\n \n+uint16_t\n+qede_xmit_pkts_cmt(void *p_fp_cmt, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct qede_fastpath_cmt *fp_cmt = p_fp_cmt;\n+\tuint16_t eng0_pkts, eng1_pkts;\n+\n+\teng0_pkts = nb_pkts / 2;\n+\n+\teng0_pkts = qede_xmit_pkts(fp_cmt->fp0->txq, tx_pkts, eng0_pkts);\n+\n+\teng1_pkts = nb_pkts - eng0_pkts;\n+\n+\teng1_pkts = qede_xmit_pkts(fp_cmt->fp1->txq, tx_pkts + eng0_pkts,\n+\t\t\t\t   eng1_pkts);\n+\n+\treturn eng0_pkts + eng1_pkts;\n+}\n+\n uint16_t\n qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,\n \t\t     __rte_unused struct rte_mbuf **pkts,\ndiff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h\nindex 41a5f0f5c..75cc930fd 100644\n--- a/drivers/net/qede/qede_rxtx.h\n+++ b/drivers/net/qede/qede_rxtx.h\n@@ -81,10 +81,8 @@\n \t\t\t\t ETH_RSS_VXLAN\t\t\t|\\\n \t\t\t\t ETH_RSS_GENEVE)\n \n-#define for_each_rss(i)\t\tfor (i = 0; i < qdev->num_rx_queues; i++)\n-#define for_each_tss(i)\t\tfor (i = 0; i < qdev->num_tx_queues; i++)\n #define QEDE_RXTX_MAX(qdev) \\\n-\t(RTE_MAX(QEDE_RSS_COUNT(qdev), QEDE_TSS_COUNT(qdev)))\n+\t(RTE_MAX(qdev->num_rx_queues, qdev->num_tx_queues))\n \n /* Macros for non-tunnel packet types lkup table */\n #define QEDE_PKT_TYPE_UNKNOWN\t\t\t\t0x0\n@@ -179,6 +177,8 @@ struct qede_agg_info {\n  * Structure associated with each RX queue.\n  */\n struct qede_rx_queue {\n+\t/* Always keep qdev as first member */\n+\tstruct qede_dev *qdev;\n \tstruct rte_mempool *mb_pool;\n \tstruct ecore_chain rx_bd_ring;\n \tstruct ecore_chain rx_comp_ring;\n@@ -199,7 +199,6 @@ struct qede_rx_queue {\n \tuint64_t rx_hw_errors;\n \tuint64_t rx_alloc_errors;\n \tstruct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];\n-\tstruct qede_dev *qdev;\n \tvoid *handle;\n };\n \n@@ -217,6 +216,8 @@ union db_prod {\n };\n \n struct qede_tx_queue {\n+\t/* Always keep qdev as first member */\n+\tstruct qede_dev *qdev;\n \tstruct ecore_chain tx_pbl;\n \tstruct qede_tx_entry *sw_tx_ring;\n \tuint16_t nb_tx_desc;\n@@ -231,7 +232,6 @@ struct qede_tx_queue {\n \tuint16_t port_id;\n \tuint64_t xmit_pkts;\n \tbool is_legacy;\n-\tstruct qede_dev *qdev;\n \tvoid *handle;\n };\n \n@@ -241,6 +241,18 @@ struct qede_fastpath {\n \tstruct qede_tx_queue *txq;\n };\n \n+/* This structure holds the inforation of fast path queues\n+ * belonging to individual engines in CMT mode.\n+ */\n+struct qede_fastpath_cmt {\n+\t/* Always keep this a first element */\n+\tstruct qede_dev *qdev;\n+\t/* fastpath info of engine 0 */\n+\tstruct qede_fastpath *fp0;\n+\t/* fastpath info of engine 1 */\n+\tstruct qede_fastpath *fp1;\n+};\n+\n /*\n  * RX/TX function prototypes\n  */\n@@ -261,12 +273,16 @@ void qede_tx_queue_release(void *tx_queue);\n \n uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,\n \t\t\tuint16_t nb_pkts);\n+uint16_t qede_xmit_pkts_cmt(void *p_txq, struct rte_mbuf **tx_pkts,\n+\t\t\t    uint16_t nb_pkts);\n \n uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,\n \t\t\t     uint16_t nb_pkts);\n \n uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,\n \t\t\tuint16_t nb_pkts);\n+uint16_t qede_recv_pkts_cmt(void *p_rxq, struct rte_mbuf **rx_pkts,\n+\t\t\t    uint16_t nb_pkts);\n \n uint16_t qede_rxtx_pkts_dummy(void *p_rxq,\n \t\t\t      struct rte_mbuf **pkts,\n",
    "prefixes": [
        "v2",
        "2/5"
    ]
}