get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/130177/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 130177,
    "url": "http://patches.dpdk.org/api/patches/130177/?format=api",
    "web_url": "http://patches.dpdk.org/project/dpdk/patch/20230811163419.165790-7-hkalra@marvell.com/",
    "project": {
        "id": 1,
        "url": "http://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230811163419.165790-7-hkalra@marvell.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230811163419.165790-7-hkalra@marvell.com",
    "date": "2023-08-11T16:34:16",
    "name": "[6/9] net/cnxk: representor ethdev ops",
    "commit_ref": null,
    "pull_url": null,
    "state": "changes-requested",
    "archived": true,
    "hash": "f0b1d2f89c67d259a856301b502ad5288f4f60fa",
    "submitter": {
        "id": 1182,
        "url": "http://patches.dpdk.org/api/people/1182/?format=api",
        "name": "Harman Kalra",
        "email": "hkalra@marvell.com"
    },
    "delegate": {
        "id": 310,
        "url": "http://patches.dpdk.org/api/users/310/?format=api",
        "username": "jerin",
        "first_name": "Jerin",
        "last_name": "Jacob",
        "email": "jerinj@marvell.com"
    },
    "mbox": "http://patches.dpdk.org/project/dpdk/patch/20230811163419.165790-7-hkalra@marvell.com/mbox/",
    "series": [
        {
            "id": 29193,
            "url": "http://patches.dpdk.org/api/series/29193/?format=api",
            "web_url": "http://patches.dpdk.org/project/dpdk/list/?series=29193",
            "date": "2023-08-11T16:34:10",
            "name": "net/cnxk: support for port representors",
            "version": 1,
            "mbox": "http://patches.dpdk.org/series/29193/mbox/"
        }
    ],
    "comments": "http://patches.dpdk.org/api/patches/130177/comments/",
    "check": "success",
    "checks": "http://patches.dpdk.org/api/patches/130177/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id CF72E43036;\n\tFri, 11 Aug 2023 18:35:45 +0200 (CEST)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 9D2A94327B;\n\tFri, 11 Aug 2023 18:35:16 +0200 (CEST)",
            "from mx0b-0016f401.pphosted.com (mx0b-0016f401.pphosted.com\n [67.231.156.173])\n by mails.dpdk.org (Postfix) with ESMTP id 3489043273\n for <dev@dpdk.org>; Fri, 11 Aug 2023 18:35:14 +0200 (CEST)",
            "from pps.filterd (m0045851.ppops.net [127.0.0.1])\n by mx0b-0016f401.pphosted.com (8.17.1.19/8.17.1.19) with ESMTP id\n 37BEGHYK021465 for <dev@dpdk.org>; Fri, 11 Aug 2023 09:35:13 -0700",
            "from dc5-exch02.marvell.com ([199.233.59.182])\n by mx0b-0016f401.pphosted.com (PPS) with ESMTPS id 3sd8ya2quj-1\n (version=TLSv1.2 cipher=ECDHE-RSA-AES256-SHA384 bits=256 verify=NOT)\n for <dev@dpdk.org>; Fri, 11 Aug 2023 09:35:13 -0700",
            "from DC5-EXCH02.marvell.com (10.69.176.39) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server (TLS) id 15.0.1497.48;\n Fri, 11 Aug 2023 09:35:11 -0700",
            "from maili.marvell.com (10.69.176.80) by DC5-EXCH02.marvell.com\n (10.69.176.39) with Microsoft SMTP Server id 15.0.1497.48 via Frontend\n Transport; Fri, 11 Aug 2023 09:35:11 -0700",
            "from localhost.localdomain (unknown [10.29.52.211])\n by maili.marvell.com (Postfix) with ESMTP id 9A1973F7055;\n Fri, 11 Aug 2023 09:35:08 -0700 (PDT)"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/relaxed; d=marvell.com;\n h=from : to : cc :\n subject : date : message-id : in-reply-to : references : mime-version :\n content-type; s=pfpt0220; bh=HPBu6oQAif/lUxtbnZ7C5yu6r6XRwh43Uo48qhby5PY=;\n b=M1000LfmlA2Lu0dVPFJDbHf8g+eH+ud8BmojR1hkNIcxb6QJ7/oKcPq8DfWWiefygd2j\n PXOokNwinJC09bDYS0PRz8CI9+7UYWo0t2ojc5z1OGRyI/KvhBU6aciNZfp9KYw0E522\n Ec6XgQXsFIYJeBT9fakb/+L8uIx9vhiwzj8mLWdzw+1Y9a/0wx+PjYQZ8W5KY27EtWBI\n ecF7cHm11B1xNLuLVb6TnL6tPUM/oo5ckMhMrz7SBc/61HpKNHMxqSPZme1H/eg2kezx\n wjlxZowLwODW8WkFVigTGKcPyrAjyLOFIMZCsljsV2dUvzhISMcnKFrUpm7qUGu6TsFw ew==",
        "From": "Harman Kalra <hkalra@marvell.com>",
        "To": "<jerinj@marvell.com>, Nithin Dabilpuram <ndabilpuram@marvell.com>, \"Kiran\n Kumar K\" <kirankumark@marvell.com>, Sunil Kumar Kori <skori@marvell.com>,\n Satha Rao <skoteshwar@marvell.com>",
        "CC": "<dev@dpdk.org>, Harman Kalra <hkalra@marvell.com>",
        "Subject": "[PATCH 6/9] net/cnxk: representor ethdev ops",
        "Date": "Fri, 11 Aug 2023 22:04:16 +0530",
        "Message-ID": "<20230811163419.165790-7-hkalra@marvell.com>",
        "X-Mailer": "git-send-email 2.18.0",
        "In-Reply-To": "<20230811163419.165790-1-hkalra@marvell.com>",
        "References": "<20230811163419.165790-1-hkalra@marvell.com>",
        "MIME-Version": "1.0",
        "Content-Type": "text/plain",
        "X-Proofpoint-ORIG-GUID": "GY4kkK-xGf3KNFX3VkWJh8jJdHjsC0B6",
        "X-Proofpoint-GUID": "GY4kkK-xGf3KNFX3VkWJh8jJdHjsC0B6",
        "X-Proofpoint-Virus-Version": "vendor=baseguard\n engine=ICAP:2.0.267,Aquarius:18.0.957,Hydra:6.0.591,FMLib:17.11.176.26\n definitions=2023-08-11_08,2023-08-10_01,2023-05-22_02",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "Implementing ethernet device operation callbacks for\nport representors PMD\n\nSigned-off-by: Harman Kalra <hkalra@marvell.com>\n---\n drivers/net/cnxk/cnxk_rep.c     |  62 +--\n drivers/net/cnxk/cnxk_rep.h     |  36 ++\n drivers/net/cnxk/cnxk_rep_msg.h |  15 +\n drivers/net/cnxk/cnxk_rep_ops.c | 655 ++++++++++++++++++++++++++++++--\n 4 files changed, 713 insertions(+), 55 deletions(-)",
    "diff": "diff --git a/drivers/net/cnxk/cnxk_rep.c b/drivers/net/cnxk/cnxk_rep.c\nindex e6f5790adc..5ee7e93ab9 100644\n--- a/drivers/net/cnxk/cnxk_rep.c\n+++ b/drivers/net/cnxk/cnxk_rep.c\n@@ -13,6 +13,9 @@ struct eth_dev_ops cnxk_rep_dev_ops = {\n \t.rx_queue_release = cnxk_rep_rx_queue_release,\n \t.tx_queue_setup = cnxk_rep_tx_queue_setup,\n \t.tx_queue_release = cnxk_rep_tx_queue_release,\n+\t.promiscuous_enable   = cnxk_rep_promiscuous_enable,\n+\t.promiscuous_disable   = cnxk_rep_promiscuous_disable,\n+\t.mac_addr_set = cnxk_rep_mac_addr_set,\n \t.link_update = cnxk_rep_link_update,\n \t.dev_close = cnxk_rep_dev_close,\n \t.dev_stop = cnxk_rep_dev_stop,\n@@ -24,14 +27,36 @@ struct eth_dev_ops cnxk_rep_dev_ops = {\n int\n cnxk_rep_dev_uninit(struct rte_eth_dev *ethdev)\n {\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);\n+\trep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;\n+\tconst struct plt_memzone *mz;\n+\n \tif (rte_eal_process_type() != RTE_PROC_PRIMARY)\n \t\treturn 0;\n \n+\tmz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);\n+\tif (!mz) {\n+\t\tplt_err(\"Failed to lookup a memzone, rep id %d, err %d\",\n+\t\t\trep_dev->vf_id, rte_errno);\n+\t\tgoto fail;\n+\t}\n+\n+\trep_xport_vdev_cfg = mz->addr;\n \tplt_rep_dbg(\"Representor port:%d uninit\", ethdev->data->port_id);\n \trte_free(ethdev->data->mac_addrs);\n \tethdev->data->mac_addrs = NULL;\n \n+\trep_xport_vdev_cfg->nb_rep_ports--;\n+\t/* Once all representors are closed, cleanup rep base vdev config */\n+\tif (!rep_xport_vdev_cfg->nb_rep_ports) {\n+\t\tplt_free(rep_xport_vdev_cfg->q_bmap_mem);\n+\t\tplt_free(rep_xport_vdev_cfg->mdevinfo);\n+\t\tplt_memzone_free(mz);\n+\t}\n+\n \treturn 0;\n+fail:\n+\treturn rte_errno;\n }\n \n int\n@@ -121,26 +146,6 @@ cnxk_init_rep_internal(struct cnxk_eth_dev *pf_dev)\n \treturn rc;\n }\n \n-static uint16_t\n-cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n-{\n-\tPLT_SET_USED(tx_queue);\n-\tPLT_SET_USED(tx_pkts);\n-\tPLT_SET_USED(nb_pkts);\n-\n-\treturn 0;\n-}\n-\n-static uint16_t\n-cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n-{\n-\tPLT_SET_USED(rx_queue);\n-\tPLT_SET_USED(rx_pkts);\n-\tPLT_SET_USED(nb_pkts);\n-\n-\treturn 0;\n-}\n-\n static int\n cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)\n {\n@@ -152,6 +157,11 @@ cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)\n \trep_dev->vf_id = rep_params->vf_id;\n \trep_dev->switch_domain_id = rep_params->switch_domain_id;\n \trep_dev->parent_dev = rep_params->parent_dev;\n+\trep_dev->u.rxq = UINT16_MAX;\n+\trep_dev->u.txq = UINT16_MAX;\n+\n+\tpf_dev = cnxk_eth_pmd_priv(rep_dev->parent_dev);\n+\trep_dev->rep_xport_vdev = pf_dev->rep_xport_vdev;\n \n \teth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;\n \teth_dev->data->representor_id = rep_params->vf_id;\n@@ -170,11 +180,10 @@ cnxk_rep_dev_init(struct rte_eth_dev *eth_dev, void *params)\n \teth_dev->dev_ops = &cnxk_rep_dev_ops;\n \n \t/* Rx/Tx functions stubs to avoid crashing */\n-\teth_dev->rx_pkt_burst = cnxk_rep_rx_burst;\n-\teth_dev->tx_pkt_burst = cnxk_rep_tx_burst;\n+\teth_dev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;\n+\teth_dev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;\n \n \t/* Link state. Inherited from PF */\n-\tpf_dev = cnxk_eth_pmd_priv(rep_dev->parent_dev);\n \tlink = &pf_dev->eth_dev->data->dev_link;\n \n \teth_dev->data->dev_link.link_speed = link->link_speed;\n@@ -325,13 +334,6 @@ cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct rte_eth_dev *pf_ethdev\n \t\tgoto err;\n \t}\n \n-\t/* Launch a thread to handle control messages */\n-\trc = cnxk_rep_control_thread_launch(pf_dev);\n-\tif (rc) {\n-\t\tplt_err(\"Failed to launch message ctrl thread\");\n-\t\tgoto err;\n-\t}\n-\n \treturn 0;\n err:\n \treturn rc;\ndiff --git a/drivers/net/cnxk/cnxk_rep.h b/drivers/net/cnxk/cnxk_rep.h\nindex 8825fa1cf2..2b6403f003 100644\n--- a/drivers/net/cnxk/cnxk_rep.h\n+++ b/drivers/net/cnxk/cnxk_rep.h\n@@ -6,6 +6,7 @@\n #ifndef __CNXK_REP_H__\n #define __CNXK_REP_H__\n \n+#define CNXK_REP_XPORT_VDEV_CFG_MZ  \"rep_xport_vdev_cfg\"\n #define CNXK_REP_XPORT_VDEV_DEVARGS \"role=server\"\n #define CNXK_REP_XPORT_VDEV_NAME\t   \"net_memif\"\n #define CNXK_REP_VDEV_CTRL_QUEUE   0\n@@ -14,6 +15,18 @@\n /* Common ethdev ops */\n extern struct eth_dev_ops cnxk_rep_dev_ops;\n \n+/* Representor base device configurations */\n+typedef struct rep_xport_vdev_cfg_s {\n+\tstruct plt_bitmap *q_map;\n+\tvoid *q_bmap_mem;\n+\tuint8_t nb_rep_ports;\n+\tuint8_t nb_rep_started;\n+\tstruct rte_mempool *ctrl_chan_pool;\n+\tstruct rte_eth_dev_info *mdevinfo;\n+\tbool rep_xport_configured;\n+} rep_xport_vdev_cfg_t;\n+\n+/* Representor port configurations */\n struct cnxk_rep_dev {\n \tuint16_t vf_id;\n \tuint16_t switch_domain_id;\n@@ -22,15 +35,33 @@ struct cnxk_rep_dev {\n \tuint16_t rep_xport_vdev;\n \tbool is_vf_active;\n \tuint16_t pf_func;\n+\tunion {\n+\t\tuint16_t rxq;\n+\t\tuint16_t txq;\n+\t\tuint16_t rep_portid;\n+\t} u;\n \tuint8_t mac_addr[RTE_ETHER_ADDR_LEN];\n };\n \n+/* Inline functions */\n static inline struct cnxk_rep_dev *\n cnxk_rep_pmd_priv(const struct rte_eth_dev *eth_dev)\n {\n \treturn eth_dev->data->dev_private;\n }\n \n+static inline struct rte_eth_dev *\n+cnxk_rep_xport_eth_dev(uint16_t portid)\n+{\n+\tif (!rte_eth_dev_is_valid_port(portid)) {\n+\t\tplt_err(\"Invalid port_id=%u\", portid);\n+\t\treturn NULL;\n+\t}\n+\n+\treturn &rte_eth_devices[portid];\n+}\n+\n+/* Prototypes */\n int cnxk_rep_dev_probe(struct rte_pci_device *pci_dev, struct rte_eth_dev *pf_ethdev,\n \t\t       struct rte_eth_devargs *eth_da);\n int cnxk_rep_dev_remove(struct rte_eth_dev *pf_ethdev);\n@@ -52,5 +83,10 @@ int cnxk_rep_dev_close(struct rte_eth_dev *eth_dev);\n int cnxk_rep_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats);\n int cnxk_rep_stats_reset(struct rte_eth_dev *eth_dev);\n int cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **ops);\n+int cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev);\n+int cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev);\n+int cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr);\n+uint16_t cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);\n+uint16_t cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts);\n \n #endif /* __CNXK_REP_H__ */\ndiff --git a/drivers/net/cnxk/cnxk_rep_msg.h b/drivers/net/cnxk/cnxk_rep_msg.h\nindex a28c63f762..554122d7f8 100644\n--- a/drivers/net/cnxk/cnxk_rep_msg.h\n+++ b/drivers/net/cnxk/cnxk_rep_msg.h\n@@ -19,6 +19,10 @@ typedef enum CNXK_REP_MSG {\n \tCNXK_REP_MSG_READY = 0,\n \tCNXK_REP_MSG_ACK,\n \tCNXK_REP_MSG_EXIT,\n+\t/* Ethernet operation msgs */\n+\tCNXK_REP_MSG_ETH_SET_MAC,\n+\tCNXK_REP_MSG_ETH_STATS_GET,\n+\tCNXK_REP_MSG_ETH_STATS_CLEAR,\n \t/* End of messaging sequence */\n \tCNXK_REP_MSG_END,\n } cnxk_rep_msg_t;\n@@ -64,6 +68,17 @@ typedef struct cnxk_rep_msg_exit_data {\n \tuint8_t val;\n } __rte_packed cnxk_rep_msg_exit_data_t;\n \n+/* Ethernet op - set mac */\n+typedef struct cnxk_rep_msg_eth_mac_set_meta {\n+\tuint16_t portid;\n+\tuint8_t addr_bytes[RTE_ETHER_ADDR_LEN];\n+} __rte_packed cnxk_rep_msg_eth_set_mac_meta_t;\n+\n+/* Ethernet op - get/clear stats */\n+typedef struct cnxk_rep_msg_eth_stats_meta {\n+\tuint16_t portid;\n+} __rte_packed cnxk_rep_msg_eth_stats_meta_t;\n+\n void cnxk_rep_msg_populate_command(void *buffer, uint32_t *length, cnxk_rep_msg_t type,\n \t\t\t\t   uint32_t size);\n void cnxk_rep_msg_populate_command_meta(void *buffer, uint32_t *length, void *msg_meta, uint32_t sz,\ndiff --git a/drivers/net/cnxk/cnxk_rep_ops.c b/drivers/net/cnxk/cnxk_rep_ops.c\nindex 3f1aab077b..022a5137df 100644\n--- a/drivers/net/cnxk/cnxk_rep_ops.c\n+++ b/drivers/net/cnxk/cnxk_rep_ops.c\n@@ -3,6 +3,54 @@\n  */\n \n #include <cnxk_rep.h>\n+#include <cnxk_rep_msg.h>\n+\n+#define MEMPOOL_CACHE_SIZE 256\n+#define TX_DESC_PER_QUEUE  512\n+#define RX_DESC_PER_QUEUE  256\n+#define NB_REP_VDEV_MBUF   1024\n+\n+static uint16_t\n+cnxk_rep_tx_burst(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct cnxk_rep_dev *rep_dev = tx_queue;\n+\n+\tnb_pkts = rte_eth_tx_burst(rep_dev->rep_xport_vdev, rep_dev->u.txq, tx_pkts, nb_pkts);\n+\n+\treturn nb_pkts;\n+}\n+\n+static uint16_t\n+cnxk_rep_rx_burst(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tstruct cnxk_rep_dev *rep_dev = rx_queue;\n+\n+\tnb_pkts = rte_eth_rx_burst(rep_dev->rep_xport_vdev, rep_dev->u.txq, rx_pkts, 32);\n+\tif (nb_pkts == 0)\n+\t\treturn 0;\n+\n+\treturn nb_pkts;\n+}\n+\n+uint16_t\n+cnxk_rep_tx_burst_dummy(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)\n+{\n+\tPLT_SET_USED(tx_queue);\n+\tPLT_SET_USED(tx_pkts);\n+\tPLT_SET_USED(nb_pkts);\n+\n+\treturn 0;\n+}\n+\n+uint16_t\n+cnxk_rep_rx_burst_dummy(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)\n+{\n+\tPLT_SET_USED(rx_queue);\n+\tPLT_SET_USED(rx_pkts);\n+\tPLT_SET_USED(nb_pkts);\n+\n+\treturn 0;\n+}\n \n int\n cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)\n@@ -13,39 +61,379 @@ cnxk_rep_link_update(struct rte_eth_dev *ethdev, int wait_to_complete)\n }\n \n int\n-cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *devinfo)\n+cnxk_rep_dev_info_get(struct rte_eth_dev *ethdev, struct rte_eth_dev_info *dev_info)\n {\n-\tPLT_SET_USED(ethdev);\n-\tPLT_SET_USED(devinfo);\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);\n+\trep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;\n+\tstruct rte_eth_dev_info mdevinfo;\n+\tconst struct plt_memzone *mz;\n+\tint rc = 0;\n+\n+\tmz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);\n+\tif (!mz) {\n+\t\tmz = plt_memzone_reserve_cache_align(CNXK_REP_XPORT_VDEV_CFG_MZ,\n+\t\t\t\t\t\t     sizeof(rep_xport_vdev_cfg_t));\n+\t\tif (!mz) {\n+\t\t\tplt_err(\"Failed to reserve a memzone, rep id %d, err %d\",\n+\t\t\t\trep_dev->vf_id, rte_errno);\n+\t\t\tgoto fail;\n+\t\t}\n+\t}\n+\n+\trep_xport_vdev_cfg = mz->addr;\n+\t/* Get the rep base vdev devinfo */\n+\tif (!rep_xport_vdev_cfg->mdevinfo) {\n+\t\trc = rte_eth_dev_info_get(rep_dev->rep_xport_vdev, &mdevinfo);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"Failed to get rep_xport port dev info, err %d\", rc);\n+\t\t\tgoto fail;\n+\t\t}\n+\t\trep_xport_vdev_cfg->mdevinfo = plt_zmalloc(sizeof(struct rte_eth_dev_info), 0);\n+\t\tif (!rep_xport_vdev_cfg->mdevinfo) {\n+\t\t\tplt_err(\"Failed to alloc memory for dev info\");\n+\t\t\tgoto fail;\n+\t\t}\n+\t\trte_memcpy(rep_xport_vdev_cfg->mdevinfo, &mdevinfo,\n+\t\t\t   sizeof(struct rte_eth_dev_info));\n+\t}\n+\n+\t/* Use rep_xport device info */\n+\tdev_info->max_mac_addrs = rep_xport_vdev_cfg->mdevinfo->max_mac_addrs;\n+\tdev_info->max_rx_pktlen = rep_xport_vdev_cfg->mdevinfo->max_rx_pktlen;\n+\tdev_info->min_rx_bufsize = rep_xport_vdev_cfg->mdevinfo->min_rx_bufsize;\n+\tdev_info->tx_offload_capa = rep_xport_vdev_cfg->mdevinfo->tx_offload_capa;\n+\n+\t/* For the sake of symmetry, max_rx_queues = max_tx_queues */\n+\tdev_info->max_rx_queues = 1;\n+\tdev_info->max_tx_queues = 1;\n+\n+\t/* MTU specifics */\n+\tdev_info->max_mtu = rep_xport_vdev_cfg->mdevinfo->max_mtu;\n+\tdev_info->min_mtu = rep_xport_vdev_cfg->mdevinfo->min_mtu;\n+\n+\t/* Switch info specific */\n+\tdev_info->switch_info.name = ethdev->device->name;\n+\tdev_info->switch_info.domain_id = rep_dev->switch_domain_id;\n+\tdev_info->switch_info.port_id = rep_dev->vf_id;\n+\n \treturn 0;\n+fail:\n+\treturn rc;\n+}\n+\n+static inline int\n+bitmap_ctzll(uint64_t slab)\n+{\n+\tif (slab == 0)\n+\t\treturn 0;\n+\n+\treturn __builtin_ctzll(slab);\n+}\n+\n+static uint16_t\n+alloc_rep_xport_qid(struct plt_bitmap *bmp)\n+{\n+\tuint16_t idx, rc;\n+\tuint64_t slab;\n+\tuint32_t pos;\n+\n+\tpos = 0;\n+\tslab = 0;\n+\t/* Scan from the beginning */\n+\tplt_bitmap_scan_init(bmp);\n+\t/* Scan bitmap to get the free pool */\n+\trc = plt_bitmap_scan(bmp, &pos, &slab);\n+\t/* Empty bitmap */\n+\tif (rc == 0)\n+\t\treturn UINT16_MAX;\n+\n+\tidx = pos + bitmap_ctzll(slab);\n+\tplt_bitmap_clear(bmp, idx);\n+\treturn idx;\n+}\n+\n+static int\n+configure_rep_xport_queues_map(rep_xport_vdev_cfg_t *rep_xport_vdev_cfg)\n+{\n+\tint id, rc = 0, q_max;\n+\tuint32_t bmap_sz;\n+\tvoid *bmap_mem;\n+\n+\tq_max = CNXK_MAX_REP_PORTS + 1;\n+\t/* Return success on no-pci case */\n+\tif (!q_max)\n+\t\treturn 0;\n+\n+\tbmap_sz = plt_bitmap_get_memory_footprint(q_max);\n+\n+\t/* Allocate memory for rep_xport queue bitmap */\n+\tbmap_mem = plt_zmalloc(bmap_sz, RTE_CACHE_LINE_SIZE);\n+\tif (bmap_mem == NULL) {\n+\t\tplt_err(\"Failed to allocate memory for worker lmt bmap\");\n+\t\trc = -ENOMEM;\n+\t\tgoto exit;\n+\t}\n+\trep_xport_vdev_cfg->q_bmap_mem = bmap_mem;\n+\n+\t/* Initialize worker lmt bitmap */\n+\trep_xport_vdev_cfg->q_map = plt_bitmap_init(q_max, bmap_mem, bmap_sz);\n+\tif (!rep_xport_vdev_cfg->q_map) {\n+\t\tplt_err(\"Failed to initialize rep_xport queue bitmap\");\n+\t\trc = -EIO;\n+\t\tgoto exit;\n+\t}\n+\n+\t/* Set all the queue initially */\n+\tfor (id = 0; id < q_max; id++)\n+\t\tplt_bitmap_set(rep_xport_vdev_cfg->q_bmap_mem, id);\n+\n+\treturn 0;\n+exit:\n+\treturn rc;\n+}\n+\n+static uint16_t\n+cnxk_rep_eth_dev_count_total(void)\n+{\n+\tuint16_t port, count = 0;\n+\tstruct rte_eth_dev *ethdev;\n+\n+\tRTE_ETH_FOREACH_DEV(port) {\n+\t\tethdev = &rte_eth_devices[port];\n+\t\tif (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)\n+\t\t\tcount++;\n+\t}\n+\n+\treturn count;\n+}\n+\n+static int\n+configure_control_channel(rep_xport_vdev_cfg_t *rep_xport_vdev_cfg, uint16_t portid)\n+{\n+\tstruct rte_mempool *ctrl_chan_pool = NULL;\n+\tint rc;\n+\n+\t/* Allocate a qid for control channel */\n+\talloc_rep_xport_qid(rep_xport_vdev_cfg->q_map);\n+\n+\t/* Create the mbuf pool. */\n+\tctrl_chan_pool = rte_pktmbuf_pool_create(\"rep_xport_ctrl_pool\", NB_REP_VDEV_MBUF,\n+\t\t\t\t\t\tMEMPOOL_CACHE_SIZE, RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t\tRTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());\n+\n+\tif (ctrl_chan_pool == NULL) {\n+\t\tplt_err(\"Cannot init mbuf pool\");\n+\t\trc = -ENOMEM;\n+\t\tgoto fail;\n+\t}\n+\n+\t/* Setup a RX queue for control channel */\n+\trc = rte_eth_rx_queue_setup(portid, CNXK_REP_VDEV_CTRL_QUEUE, RX_DESC_PER_QUEUE,\n+\t\t\t\t    rte_eth_dev_socket_id(portid), NULL, ctrl_chan_pool);\n+\tif (rc < 0) {\n+\t\tplt_err(\"rte_eth_rx_queue_setup:err=%d, port=%u\\n\", rc, portid);\n+\t\tgoto fail;\n+\t}\n+\n+\t/* Setup a TX queue for control channel */\n+\trc = rte_eth_tx_queue_setup(portid, CNXK_REP_VDEV_CTRL_QUEUE, TX_DESC_PER_QUEUE,\n+\t\t\t\t    rte_eth_dev_socket_id(portid), NULL);\n+\tif (rc < 0) {\n+\t\tplt_err(\"TX queue setup failed, err %d port %d\", rc, portid);\n+\t\tgoto fail;\n+\t}\n+\n+\trep_xport_vdev_cfg->ctrl_chan_pool = ctrl_chan_pool;\n+\n+\treturn 0;\n+fail:\n+\treturn rc;\n+}\n+\n+static int\n+configure_rep_xport_dev(rep_xport_vdev_cfg_t *rep_xport_vdev_cfg, uint16_t portid)\n+{\n+\tstruct rte_eth_dev *rep_xport_ethdev = cnxk_rep_xport_eth_dev(portid);\n+\tstatic struct rte_eth_conf port_conf_default;\n+\tuint16_t nb_rxq, nb_txq, nb_rep_ports;\n+\tint rc = 0;\n+\n+\t/* If rep_xport port already started, stop it and reconfigure */\n+\tif (rep_xport_ethdev->data->dev_started)\n+\t\trte_eth_dev_stop(portid);\n+\n+\t/* Get the no of representors probed */\n+\tnb_rep_ports = cnxk_rep_eth_dev_count_total();\n+\tif (nb_rep_ports > CNXK_MAX_REP_PORTS) {\n+\t\tplt_err(\"Representors probed %d > Max supported %d\", nb_rep_ports,\n+\t\t\tCNXK_MAX_REP_PORTS);\n+\t\tgoto fail;\n+\t}\n+\n+\t/* Each queue of rep_xport describes representor port. 1 additional queue is\n+\t * configured as control channel to configure flows, etc.\n+\t */\n+\tnb_rxq = CNXK_MAX_REP_PORTS + 1;\n+\tnb_txq = CNXK_MAX_REP_PORTS + 1;\n+\n+\trc = rte_eth_dev_configure(portid, nb_rxq, nb_txq, &port_conf_default);\n+\tif (rc) {\n+\t\tplt_err(\"Failed to configure rep_xport port: %d\", rc);\n+\t\tgoto fail;\n+\t}\n+\n+\trep_xport_vdev_cfg->rep_xport_configured = true;\n+\trep_xport_vdev_cfg->nb_rep_ports = nb_rep_ports;\n+\n+\treturn 0;\n+fail:\n+\treturn rc;\n }\n \n int\n cnxk_rep_dev_configure(struct rte_eth_dev *ethdev)\n {\n-\tPLT_SET_USED(ethdev);\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);\n+\trep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;\n+\tconst struct plt_memzone *mz;\n+\tint rc = -1;\n+\n+\tmz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);\n+\tif (!mz) {\n+\t\tmz = plt_memzone_reserve_cache_align(CNXK_REP_XPORT_VDEV_CFG_MZ,\n+\t\t\t\t\t\t     sizeof(rep_xport_vdev_cfg_t));\n+\t\tif (!mz) {\n+\t\t\tplt_err(\"Failed to reserve a memzone, rep id %d, err %d\",\n+\t\t\t\trep_dev->vf_id, rte_errno);\n+\t\t\tgoto fail;\n+\t\t}\n+\t}\n+\n+\trep_xport_vdev_cfg = mz->addr;\n+\t/* Return if rep_xport dev already configured */\n+\tif (rep_xport_vdev_cfg->rep_xport_configured) {\n+\t\trep_dev->ctrl_chan_pool = rep_xport_vdev_cfg->ctrl_chan_pool;\n+\t\treturn 0;\n+\t}\n+\n+\t/* Configure rep_xport pmd */\n+\trc = configure_rep_xport_dev(rep_xport_vdev_cfg, rep_dev->rep_xport_vdev);\n+\tif (rc) {\n+\t\tplt_err(\"Configuring rep_xport port failed\");\n+\t\tgoto free;\n+\t}\n+\n+\t/* Setup a bitmap for rep_xport queues */\n+\trc = configure_rep_xport_queues_map(rep_xport_vdev_cfg);\n+\tif (rc != 0) {\n+\t\tplt_err(\"Failed to setup rep_xport queue map, err %d\", rc);\n+\t\tgoto free;\n+\t}\n+\n+\t/* Setup a queue for control channel */\n+\trc = configure_control_channel(rep_xport_vdev_cfg, rep_dev->rep_xport_vdev);\n+\tif (rc != 0) {\n+\t\tplt_err(\"Failed to setup control channgel, err %d\", rc);\n+\t\tgoto free;\n+\t}\n+\trep_dev->ctrl_chan_pool = rep_xport_vdev_cfg->ctrl_chan_pool;\n+\n \treturn 0;\n+free:\n+\tplt_memzone_free(mz);\n+fail:\n+\treturn rc;\n }\n \n int\n-cnxk_rep_dev_start(struct rte_eth_dev *ethdev)\n+cnxk_rep_promiscuous_enable(struct rte_eth_dev *ethdev)\n {\n \tPLT_SET_USED(ethdev);\n \treturn 0;\n }\n \n int\n-cnxk_rep_dev_close(struct rte_eth_dev *ethdev)\n+cnxk_rep_promiscuous_disable(struct rte_eth_dev *ethdev)\n {\n \tPLT_SET_USED(ethdev);\n \treturn 0;\n }\n \n+int\n+cnxk_rep_dev_start(struct rte_eth_dev *ethdev)\n+{\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);\n+\trep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;\n+\tconst struct plt_memzone *mz;\n+\tint rc = 0;\n+\n+\tmz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);\n+\tif (!mz) {\n+\t\tplt_err(\"Failed to lookup a memzone, rep id %d, err %d\",\n+\t\t\trep_dev->vf_id, rte_errno);\n+\t\tgoto fail;\n+\t}\n+\n+\trep_xport_vdev_cfg = mz->addr;\n+\tethdev->rx_pkt_burst = cnxk_rep_rx_burst;\n+\tethdev->tx_pkt_burst = cnxk_rep_tx_burst;\n+\n+\t/* Start rep_xport device only once after first representor gets active */\n+\tif (!rep_xport_vdev_cfg->nb_rep_started) {\n+\t\trc = rte_eth_dev_start(rep_dev->rep_xport_vdev);\n+\t\tif (rc) {\n+\t\t\tplt_err(\"Rep base vdev portid %d start failed, err %d\",\n+\t\t\t\trep_dev->rep_xport_vdev, rc);\n+\t\t\tgoto fail;\n+\t\t}\n+\n+\t\t/* Launch a thread to handle control messages */\n+\t\trc = cnxk_rep_control_thread_launch(cnxk_eth_pmd_priv(rep_dev->parent_dev));\n+\t\tif (rc) {\n+\t\t\tplt_err(\"Failed to launch message ctrl thread\");\n+\t\t\tgoto fail;\n+\t\t}\n+\t}\n+\n+\trep_xport_vdev_cfg->nb_rep_started++;\n+\n+\treturn 0;\n+fail:\n+\treturn rc;\n+}\n+\n+int\n+cnxk_rep_dev_close(struct rte_eth_dev *ethdev)\n+{\n+\treturn cnxk_rep_dev_uninit(ethdev);\n+}\n+\n int\n cnxk_rep_dev_stop(struct rte_eth_dev *ethdev)\n {\n-\tPLT_SET_USED(ethdev);\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);\n+\trep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;\n+\tconst struct plt_memzone *mz;\n+\n+\tmz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);\n+\tif (!mz) {\n+\t\tplt_err(\"Failed to lookup a memzone, rep id %d, err %d\",\n+\t\t\trep_dev->vf_id, rte_errno);\n+\t\tgoto fail;\n+\t}\n+\n+\trep_xport_vdev_cfg = mz->addr;\n+\tethdev->rx_pkt_burst = cnxk_rep_rx_burst_dummy;\n+\tethdev->tx_pkt_burst = cnxk_rep_tx_burst_dummy;\n+\trep_xport_vdev_cfg->nb_rep_started--;\n+\n+\t/* Stop rep_xport device only after all other devices stopped */\n+\tif (!rep_xport_vdev_cfg->nb_rep_started)\n+\t\trte_eth_dev_stop(rep_dev->rep_xport_vdev);\n+\n \treturn 0;\n+fail:\n+\treturn rte_errno;\n }\n \n int\n@@ -53,54 +441,220 @@ cnxk_rep_rx_queue_setup(struct rte_eth_dev *ethdev, uint16_t rx_queue_id, uint16\n \t\t\tunsigned int socket_id, const struct rte_eth_rxconf *rx_conf,\n \t\t\tstruct rte_mempool *mb_pool)\n {\n-\tPLT_SET_USED(ethdev);\n-\tPLT_SET_USED(rx_queue_id);\n-\tPLT_SET_USED(nb_rx_desc);\n-\tPLT_SET_USED(socket_id);\n-\tPLT_SET_USED(rx_conf);\n-\tPLT_SET_USED(mb_pool);\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);\n+\trep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;\n+\tconst struct plt_memzone *mz;\n+\tint rc = 0;\n+\n+\tmz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);\n+\tif (!mz) {\n+\t\tplt_err(\"Failed to lookup a memzone, rep id %d, err %d\",\n+\t\t\trep_dev->vf_id, rte_errno);\n+\t\tgoto fail;\n+\t}\n+\n+\trep_xport_vdev_cfg = mz->addr;\n+\t/* Allocate a qid, if tx queue setup already done use the same qid */\n+\tif (rep_dev->u.rxq == UINT16_MAX && rep_dev->u.txq == UINT16_MAX)\n+\t\trep_dev->u.rxq = alloc_rep_xport_qid(rep_xport_vdev_cfg->q_map);\n+\telse\n+\t\trep_dev->u.rxq = rep_dev->u.txq;\n+\n+\t/* Setup the RX queue */\n+\trc = rte_eth_rx_queue_setup(rep_dev->rep_xport_vdev, rep_dev->u.rxq, nb_rx_desc, socket_id,\n+\t\t\t\t    rx_conf, mb_pool);\n+\tif (rc < 0) {\n+\t\tplt_err(\"rte_eth_rx_queue_setup:err=%d, port=%u\\n\", rc, rep_dev->rep_xport_vdev);\n+\t\tgoto fail;\n+\t}\n+\n+\tethdev->data->rx_queues[rx_queue_id] = rep_dev;\n+\tplt_info(\"Representor id %d portid %d rxq %d\", rep_dev->vf_id, ethdev->data->port_id,\n+\t\t rep_dev->u.rxq);\n+\n \treturn 0;\n+fail:\n+\treturn rc;\n }\n \n void\n cnxk_rep_rx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)\n {\n-\tPLT_SET_USED(ethdev);\n-\tPLT_SET_USED(queue_id);\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);\n+\trep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;\n+\tconst struct plt_memzone *mz;\n+\tRTE_SET_USED(queue_id);\n+\n+\tmz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);\n+\tif (!mz) {\n+\t\tplt_err(\"Failed to lookup a memzone, rep id %d, err %d\",\n+\t\t\trep_dev->vf_id, rte_errno);\n+\t\treturn;\n+\t}\n+\n+\trep_xport_vdev_cfg = mz->addr;\n+\tplt_bitmap_clear(rep_xport_vdev_cfg->q_bmap_mem, rep_dev->u.rxq);\n }\n \n int\n cnxk_rep_tx_queue_setup(struct rte_eth_dev *ethdev, uint16_t tx_queue_id, uint16_t nb_tx_desc,\n \t\t\tunsigned int socket_id, const struct rte_eth_txconf *tx_conf)\n {\n-\tPLT_SET_USED(ethdev);\n-\tPLT_SET_USED(tx_queue_id);\n-\tPLT_SET_USED(nb_tx_desc);\n-\tPLT_SET_USED(socket_id);\n-\tPLT_SET_USED(tx_conf);\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);\n+\trep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;\n+\tconst struct plt_memzone *mz;\n+\tint rc = 0;\n+\n+\tmz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);\n+\tif (!mz) {\n+\t\tplt_err(\"Failed to lookup a memzone, rep id %d, err %d\",\n+\t\t\trep_dev->vf_id, rte_errno);\n+\t\tgoto fail;\n+\t}\n+\n+\trep_xport_vdev_cfg = mz->addr;\n+\t/* Allocate a qid, if rx queue setup already done use the same qid */\n+\tif (rep_dev->u.rxq == UINT16_MAX && rep_dev->u.txq == UINT16_MAX)\n+\t\trep_dev->u.txq = alloc_rep_xport_qid(rep_xport_vdev_cfg->q_map);\n+\telse\n+\t\trep_dev->u.txq = rep_dev->u.rxq;\n+\n+\t/* Setup the TX queue */\n+\trc = rte_eth_tx_queue_setup(rep_dev->rep_xport_vdev, rep_dev->u.txq, nb_tx_desc, socket_id,\n+\t\t\t\t    tx_conf);\n+\tif (rc < 0) {\n+\t\tplt_err(\"TX queue setup failed, err %d port %d\", rc, rep_dev->rep_xport_vdev);\n+\t\tgoto fail;\n+\t}\n+\n+\tethdev->data->tx_queues[tx_queue_id] = rep_dev;\n+\tplt_info(\"Representor id %d portid %d txq %d\", rep_dev->vf_id, ethdev->data->port_id,\n+\t\t rep_dev->u.txq);\n+\n \treturn 0;\n+fail:\n+\treturn rc;\n }\n \n void\n cnxk_rep_tx_queue_release(struct rte_eth_dev *ethdev, uint16_t queue_id)\n {\n-\tPLT_SET_USED(ethdev);\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);\n+\trep_xport_vdev_cfg_t *rep_xport_vdev_cfg = NULL;\n+\tconst struct plt_memzone *mz;\n \tPLT_SET_USED(queue_id);\n+\n+\tmz = plt_memzone_lookup(CNXK_REP_XPORT_VDEV_CFG_MZ);\n+\tif (!mz) {\n+\t\tplt_err(\"Failed to lookup a memzone, rep id %d, err %d\",\n+\t\t\trep_dev->vf_id, rte_errno);\n+\t\treturn;\n+\t}\n+\n+\trep_xport_vdev_cfg = mz->addr;\n+\tplt_bitmap_clear(rep_xport_vdev_cfg->q_bmap_mem, rep_dev->u.txq);\n+}\n+\n+static int\n+process_eth_stats(struct cnxk_rep_dev *rep_dev, cnxk_rep_msg_ack_data_t *adata, cnxk_rep_msg_t msg)\n+{\n+\tcnxk_rep_msg_eth_stats_meta_t msg_st_meta;\n+\tuint32_t len = 0, rc;\n+\tvoid *buffer;\n+\tsize_t size;\n+\n+\tsize = CNXK_REP_MSG_MAX_BUFFER_SZ;\n+\tbuffer = plt_zmalloc(size, 0);\n+\tif (!buffer) {\n+\t\tplt_err(\"Failed to allocate mem\");\n+\t\trc = -ENOMEM;\n+\t\tgoto fail;\n+\t}\n+\n+\tcnxk_rep_msg_populate_header(buffer, &len);\n+\n+\tmsg_st_meta.portid = rep_dev->u.rxq;\n+\tcnxk_rep_msg_populate_command_meta(buffer, &len, &msg_st_meta,\n+\t\t\t\t\t   sizeof(cnxk_rep_msg_eth_stats_meta_t), msg);\n+\tcnxk_rep_msg_populate_msg_end(buffer, &len);\n+\n+\trc = cnxk_rep_msg_send_process(rep_dev, buffer, len, adata);\n+\tif (rc) {\n+\t\tplt_err(\"Failed to process the message, err %d\", rc);\n+\t\tgoto fail;\n+\t}\n+\n+\trte_free(buffer);\n+\n+\treturn 0;\n+fail:\n+\trte_free(buffer);\n+\treturn rc;\n }\n \n int\n cnxk_rep_stats_get(struct rte_eth_dev *ethdev, struct rte_eth_stats *stats)\n {\n-\tPLT_SET_USED(ethdev);\n-\tPLT_SET_USED(stats);\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);\n+\tstruct rte_eth_stats vf_stats;\n+\tcnxk_rep_msg_ack_data_t adata;\n+\tint rc;\n+\n+\t/* If representor not representing any active VF, return 0 */\n+\tif (!rep_dev->is_vf_active)\n+\t\treturn 0;\n+\n+\trc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_GET);\n+\tif (rc || adata.u.sval < 0) {\n+\t\tif (adata.u.sval < 0)\n+\t\t\trc = adata.u.sval;\n+\n+\t\tplt_err(\"Failed to clear stats for vf rep %x, err %d\", rep_dev->vf_id, rc);\n+\t}\n+\n+\tif (adata.size != sizeof(struct rte_eth_stats)) {\n+\t\trc = -EINVAL;\n+\t\tplt_err(\"Incomplete stats received for vf rep %d\", rep_dev->vf_id);\n+\t\tgoto fail;\n+\t}\n+\n+\trte_memcpy(&vf_stats, adata.u.data, adata.size);\n+\n+\tstats->q_ipackets[0] = vf_stats.ipackets;\n+\tstats->q_ibytes[0] = vf_stats.ibytes;\n+\tstats->ipackets = vf_stats.ipackets;\n+\tstats->ibytes = vf_stats.ibytes;\n+\n+\tstats->q_opackets[0] = vf_stats.opackets;\n+\tstats->q_obytes[0] = vf_stats.obytes;\n+\tstats->opackets = vf_stats.opackets;\n+\tstats->obytes = vf_stats.obytes;\n+\n \treturn 0;\n+fail:\n+\treturn rc;\n }\n \n int\n cnxk_rep_stats_reset(struct rte_eth_dev *ethdev)\n {\n-\tPLT_SET_USED(ethdev);\n-\treturn 0;\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(ethdev);\n+\tcnxk_rep_msg_ack_data_t adata;\n+\tint rc = 0;\n+\n+\t/* If representor not representing any active VF, return 0 */\n+\tif (!rep_dev->is_vf_active)\n+\t\treturn 0;\n+\n+\trc = process_eth_stats(rep_dev, &adata, CNXK_REP_MSG_ETH_STATS_CLEAR);\n+\tif (rc || adata.u.sval < 0) {\n+\t\tif (adata.u.sval < 0)\n+\t\t\trc = adata.u.sval;\n+\n+\t\tplt_err(\"Failed to clear stats for vf rep %x, err %d\", rep_dev->vf_id, rc);\n+\t}\n+\n+\treturn rc;\n }\n \n int\n@@ -110,3 +664,54 @@ cnxk_rep_flow_ops_get(struct rte_eth_dev *ethdev, const struct rte_flow_ops **op\n \tPLT_SET_USED(ops);\n \treturn 0;\n }\n+\n+int\n+cnxk_rep_mac_addr_set(struct rte_eth_dev *eth_dev, struct rte_ether_addr *addr)\n+{\n+\tstruct cnxk_rep_dev *rep_dev = cnxk_rep_pmd_priv(eth_dev);\n+\tcnxk_rep_msg_eth_set_mac_meta_t msg_sm_meta;\n+\tcnxk_rep_msg_ack_data_t adata;\n+\tuint32_t len = 0, rc;\n+\tvoid *buffer;\n+\tsize_t size;\n+\n+\t/* If representor not representing any VF, return 0 */\n+\tif (!rep_dev->is_vf_active)\n+\t\treturn 0;\n+\n+\tsize = CNXK_REP_MSG_MAX_BUFFER_SZ;\n+\tbuffer = plt_zmalloc(size, 0);\n+\tif (!buffer) {\n+\t\tplt_err(\"Failed to allocate mem\");\n+\t\trc = -ENOMEM;\n+\t\tgoto fail;\n+\t}\n+\n+\tcnxk_rep_msg_populate_header(buffer, &len);\n+\n+\tmsg_sm_meta.portid = rep_dev->u.rxq;\n+\trte_memcpy(&msg_sm_meta.addr_bytes, addr->addr_bytes, RTE_ETHER_ADDR_LEN);\n+\tcnxk_rep_msg_populate_command_meta(buffer, &len, &msg_sm_meta,\n+\t\t\t\t\t   sizeof(cnxk_rep_msg_eth_set_mac_meta_t),\n+\t\t\t\t\t   CNXK_REP_MSG_ETH_SET_MAC);\n+\tcnxk_rep_msg_populate_msg_end(buffer, &len);\n+\n+\trc = cnxk_rep_msg_send_process(rep_dev, buffer, len, &adata);\n+\tif (rc) {\n+\t\tplt_err(\"Failed to process the message, err %d\", rc);\n+\t\tgoto fail;\n+\t}\n+\n+\tif (adata.u.sval < 0) {\n+\t\trc = adata.u.sval;\n+\t\tplt_err(\"Failed to set mac address, err %d\", rc);\n+\t\tgoto fail;\n+\t}\n+\n+\trte_free(buffer);\n+\n+\treturn 0;\n+fail:\n+\trte_free(buffer);\n+\treturn rc;\n+}\n",
    "prefixes": [
        "6/9"
    ]
}