get:
Show a patch.

patch:
Update a patch.

put:
Update a patch.

GET /api/patches/123884/?format=api
HTTP 200 OK
Allow: GET, PUT, PATCH, HEAD, OPTIONS
Content-Type: application/json
Vary: Accept

{
    "id": 123884,
    "url": "https://patches.dpdk.org/api/patches/123884/?format=api",
    "web_url": "https://patches.dpdk.org/project/dpdk/patch/20230214113852.3341607-4-mingxia.liu@intel.com/",
    "project": {
        "id": 1,
        "url": "https://patches.dpdk.org/api/projects/1/?format=api",
        "name": "DPDK",
        "link_name": "dpdk",
        "list_id": "dev.dpdk.org",
        "list_email": "dev@dpdk.org",
        "web_url": "http://core.dpdk.org",
        "scm_url": "git://dpdk.org/dpdk",
        "webscm_url": "http://git.dpdk.org/dpdk",
        "list_archive_url": "https://inbox.dpdk.org/dev",
        "list_archive_url_format": "https://inbox.dpdk.org/dev/{}",
        "commit_url_format": ""
    },
    "msgid": "<20230214113852.3341607-4-mingxia.liu@intel.com>",
    "list_archive_url": "https://inbox.dpdk.org/dev/20230214113852.3341607-4-mingxia.liu@intel.com",
    "date": "2023-02-14T11:38:50",
    "name": "[v2,3/5] net/cpfl: add hairpin queue enable and setup",
    "commit_ref": null,
    "pull_url": null,
    "state": "superseded",
    "archived": true,
    "hash": "22fdada8bde8f7d1bda8a8e7f907864c86c95573",
    "submitter": {
        "id": 2514,
        "url": "https://patches.dpdk.org/api/people/2514/?format=api",
        "name": "Liu, Mingxia",
        "email": "mingxia.liu@intel.com"
    },
    "delegate": {
        "id": 1540,
        "url": "https://patches.dpdk.org/api/users/1540/?format=api",
        "username": "qzhan15",
        "first_name": "Qi",
        "last_name": "Zhang",
        "email": "qi.z.zhang@intel.com"
    },
    "mbox": "https://patches.dpdk.org/project/dpdk/patch/20230214113852.3341607-4-mingxia.liu@intel.com/mbox/",
    "series": [
        {
            "id": 27002,
            "url": "https://patches.dpdk.org/api/series/27002/?format=api",
            "web_url": "https://patches.dpdk.org/project/dpdk/list/?series=27002",
            "date": "2023-02-14T11:38:47",
            "name": "add port to port feature",
            "version": 2,
            "mbox": "https://patches.dpdk.org/series/27002/mbox/"
        }
    ],
    "comments": "https://patches.dpdk.org/api/patches/123884/comments/",
    "check": "warning",
    "checks": "https://patches.dpdk.org/api/patches/123884/checks/",
    "tags": {},
    "related": [],
    "headers": {
        "Return-Path": "<dev-bounces@dpdk.org>",
        "X-Original-To": "patchwork@inbox.dpdk.org",
        "Delivered-To": "patchwork@inbox.dpdk.org",
        "Received": [
            "from mails.dpdk.org (mails.dpdk.org [217.70.189.124])\n\tby inbox.dpdk.org (Postfix) with ESMTP id 49FD641C49;\n\tTue, 14 Feb 2023 13:36:51 +0100 (CET)",
            "from mails.dpdk.org (localhost [127.0.0.1])\n\tby mails.dpdk.org (Postfix) with ESMTP id 187F542D9B;\n\tTue, 14 Feb 2023 13:36:35 +0100 (CET)",
            "from mga18.intel.com (mga18.intel.com [134.134.136.126])\n by mails.dpdk.org (Postfix) with ESMTP id 5025342D8A\n for <dev@dpdk.org>; Tue, 14 Feb 2023 13:36:33 +0100 (CET)",
            "from orsmga008.jf.intel.com ([10.7.209.65])\n by orsmga106.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384;\n 14 Feb 2023 04:36:32 -0800",
            "from dpdk-mingxial-01.sh.intel.com ([10.67.119.167])\n by orsmga008.jf.intel.com with ESMTP; 14 Feb 2023 04:36:30 -0800"
        ],
        "DKIM-Signature": "v=1; a=rsa-sha256; c=relaxed/simple;\n d=intel.com; i=@intel.com; q=dns/txt; s=Intel;\n t=1676378193; x=1707914193;\n h=from:to:cc:subject:date:message-id:in-reply-to:\n references:mime-version:content-transfer-encoding;\n bh=hDI0fYJETKxuxHL87DLZUMfunVr4MrpqXn3JerfYyiQ=;\n b=ce8p4B0j7XeTXK6jh+L3ERT3jPXVBfDmEVgiq02gfrCZ4WNoel0SZeqQ\n kwYs6cFPbYkVamt8EqUxrzve1wR5Hazxboy0pQBFt7cJ0TH7yTyrGnMdt\n rScDrU6XazU1uxhXe3kL9zCoc9L9Qu4KJ5CzVoBmLGrAKl2zdX6Swx5cw\n 6h2xKqP6jlE1hDqreGS8A/GNcxdsxm2qxwakif2PW85x/kneitPHlz8Nk\n IF/Z21LHKNXw3WnbDMWpFFgRfqADnfjj2gj+JJ2ibudPv+FdLiXc+nmMx\n i0AhDq1vPhPY/aVFT3+fTfDNEb5mxF5gdSQZVpwN/+IvMamvjxocv64nm w==;",
        "X-IronPort-AV": [
            "E=McAfee;i=\"6500,9779,10620\"; a=\"314793190\"",
            "E=Sophos;i=\"5.97,296,1669104000\"; d=\"scan'208\";a=\"314793190\"",
            "E=McAfee;i=\"6500,9779,10620\"; a=\"699528522\"",
            "E=Sophos;i=\"5.97,296,1669104000\"; d=\"scan'208\";a=\"699528522\""
        ],
        "X-ExtLoop1": "1",
        "From": "Mingxia Liu <mingxia.liu@intel.com>",
        "To": "dev@dpdk.org,\n\tbeilei.xing@intel.com,\n\tyuying.zhang@intel.com",
        "Cc": "Mingxia Liu <mingxia.liu@intel.com>",
        "Subject": "[PATCH v2 3/5] net/cpfl: add hairpin queue enable and setup",
        "Date": "Tue, 14 Feb 2023 11:38:50 +0000",
        "Message-Id": "<20230214113852.3341607-4-mingxia.liu@intel.com>",
        "X-Mailer": "git-send-email 2.25.1",
        "In-Reply-To": "<20230214113852.3341607-1-mingxia.liu@intel.com>",
        "References": "<20230118130659.976873-1-mingxia.liu@intel.com>\n <20230214113852.3341607-1-mingxia.liu@intel.com>",
        "MIME-Version": "1.0",
        "Content-Transfer-Encoding": "8bit",
        "X-BeenThere": "dev@dpdk.org",
        "X-Mailman-Version": "2.1.29",
        "Precedence": "list",
        "List-Id": "DPDK patches and discussions <dev.dpdk.org>",
        "List-Unsubscribe": "<https://mails.dpdk.org/options/dev>,\n <mailto:dev-request@dpdk.org?subject=unsubscribe>",
        "List-Archive": "<http://mails.dpdk.org/archives/dev/>",
        "List-Post": "<mailto:dev@dpdk.org>",
        "List-Help": "<mailto:dev-request@dpdk.org?subject=help>",
        "List-Subscribe": "<https://mails.dpdk.org/listinfo/dev>,\n <mailto:dev-request@dpdk.org?subject=subscribe>",
        "Errors-To": "dev-bounces@dpdk.org"
    },
    "content": "This patch add hairpin queue ops\n - hairpin_cap_get\n - rx_hairpin_queue_setup\n - tx_hairpin_queue_setup\n\nSigned-off-by: Mingxia Liu <mingxia.liu@intel.com>\n---\n drivers/net/cpfl/cpfl_ethdev.c |  15 ++\n drivers/net/cpfl/cpfl_rxtx.c   | 443 ++++++++++++++++++++++++++++++++-\n drivers/net/cpfl/cpfl_rxtx.h   |  22 +-\n 3 files changed, 468 insertions(+), 12 deletions(-)",
    "diff": "diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c\nindex acc6180ca4..ebee21a82a 100644\n--- a/drivers/net/cpfl/cpfl_ethdev.c\n+++ b/drivers/net/cpfl/cpfl_ethdev.c\n@@ -159,6 +159,18 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,\n \treturn rte_eth_linkstatus_set(dev, &new_link);\n }\n \n+static int\n+cpfl_hairpin_cap_get(__rte_unused struct rte_eth_dev *dev,\n+\t\t     struct rte_eth_hairpin_cap *cap)\n+{\n+\tcap->max_nb_queues = CPFL_MAX_NB_QUEUES;\n+\tcap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;\n+\tcap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;\n+\tcap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;\n+\n+\treturn 0;\n+}\n+\n static int\n cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)\n {\n@@ -1295,6 +1307,9 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {\n \t.xstats_get\t\t\t= cpfl_dev_xstats_get,\n \t.xstats_get_names\t\t= cpfl_dev_xstats_get_names,\n \t.xstats_reset\t\t\t= cpfl_dev_xstats_reset,\n+\t.hairpin_cap_get\t\t= cpfl_hairpin_cap_get,\n+\t.rx_hairpin_queue_setup\t\t= cpfl_rx_hairpin_queue_setup,\n+\t.tx_hairpin_queue_setup\t\t= cpfl_tx_hairpin_queue_setup,\n };\n \n static uint16_t\ndiff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c\nindex c7e5487366..e59cabe893 100644\n--- a/drivers/net/cpfl/cpfl_rxtx.c\n+++ b/drivers/net/cpfl/cpfl_rxtx.c\n@@ -10,11 +10,77 @@\n #include \"cpfl_rxtx.h\"\n #include \"cpfl_rxtx_vec_common.h\"\n \n+#define CPFL_NB_MBUF\t\t4096\n+#define CPFL_CACHE_SIZE\t\t250\n+#define CPFL_MBUF_SIZE\t\t2048\n+#define CPFL_P2P_RING_BUF\t128\n+\n static void\n cpfl_tx_queue_release(void *txq);\n static void\n cpfl_rx_queue_release(void *txq);\n \n+static inline void\n+reset_tx_hairpin_descq(struct idpf_tx_queue *txq)\n+{\n+\tuint32_t i, size;\n+\n+\tif (!txq) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to txq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tsize = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)txq->desc_ring)[i] = 0;\n+}\n+\n+static inline void\n+reset_tx_hairpin_complq(struct idpf_tx_queue *cq)\n+{\n+\tuint32_t i, size;\n+\n+\tif (!cq) {\n+\t\tPMD_DRV_LOG(DEBUG, \"Pointer to complq is NULL\");\n+\t\treturn;\n+\t}\n+\n+\tsize = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;\n+\tfor (i = 0; i < size; i++)\n+\t\t((volatile char *)cq->compl_ring)[i] = 0;\n+}\n+\n+static inline void\n+reset_rx_hairpin_descq(struct idpf_rx_queue *rxq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (!rxq)\n+\t\treturn;\n+\n+\tlen = rxq->nb_rx_desc;\n+\tfor (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)\n+\t\t((volatile char *)rxq->rx_ring)[i] = 0;\n+}\n+\n+static inline void\n+reset_rx_hairpin_bufq(struct idpf_rx_queue *rxbq)\n+{\n+\tuint16_t len;\n+\tuint32_t i;\n+\n+\tif (!rxbq)\n+\t\treturn;\n+\n+\tlen = rxbq->nb_rx_desc;\n+\tfor (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)\n+\t\t((volatile char *)rxbq->rx_ring)[i] = 0;\n+\n+\trxbq->bufq1 = NULL;\n+\trxbq->bufq2 = NULL;\n+}\n+\n static uint64_t\n cpfl_rx_offload_convert(uint64_t offload)\n {\n@@ -763,16 +829,25 @@ cpfl_rx_queue_release(void *rxq)\n \tq = &(cpfl_rxq->base);\n \n \t/* Split queue */\n-\tif (q->bufq1 != NULL && q->bufq2 != NULL) {\n+\tif (q->bufq1 != NULL) {\n+\t\t/* the mz is shared between Tx/Rx hairpin, let Tx_release\n+\t\t * free the buf.\n+\t\t */\n+\t\tif (!cpfl_rxq->hairpin_info.hairpin_q) {\n+\t\t\trte_memzone_free(q->bufq1->mz);\n+\t\t\tif (q->bufq2 != NULL)\n+\t\t\t\trte_memzone_free(q->bufq2->mz);\n+\t\t\trte_memzone_free(q->mz);\n+\t\t}\n \t\tq->bufq1->ops->release_mbufs(q->bufq1);\n \t\trte_free(q->bufq1->sw_ring);\n-\t\trte_memzone_free(q->bufq1->mz);\n \t\trte_free(q->bufq1);\n-\t\tq->bufq2->ops->release_mbufs(q->bufq2);\n-\t\trte_free(q->bufq2->sw_ring);\n-\t\trte_memzone_free(q->bufq2->mz);\n-\t\trte_free(q->bufq2);\n-\t\trte_memzone_free(q->mz);\n+\n+\t\tif (q->bufq2 != NULL) {\n+\t\t\tq->bufq2->ops->release_mbufs(q->bufq2);\n+\t\t\trte_free(q->bufq2->sw_ring);\n+\t\t\trte_free(q->bufq2);\n+\t\t}\n \t\trte_free(cpfl_rxq);\n \t\treturn;\n \t}\n@@ -1042,3 +1117,357 @@ cpfl_set_tx_function(struct rte_eth_dev *dev)\n \t}\n #endif /* RTE_ARCH_X86 */\n }\n+\n+static int\n+cpfl_rx_hairpin_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,\n+\t\t\t   uint16_t queue_idx, uint16_t nb_desc,\n+\t\t\t   struct idpf_tx_queue *peer_txq)\n+{\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n+\tstruct idpf_adapter *adapter = vport->adapter;\n+\tstruct idpf_hw *hw = &adapter->hw;\n+\tconst struct rte_memzone *mz;\n+\tstruct rte_mempool *mp;\n+\tuint32_t ring_size;\n+\tchar pool_name[RTE_MEMPOOL_NAMESIZE];\n+\n+\tmp = cpfl_vport->p2p_mp;\n+\tif (!mp) {\n+\t\tsnprintf(pool_name, RTE_MEMPOOL_NAMESIZE, \"p2p_mb_pool_%u\",\n+\t\t\t dev->data->port_id);\n+\t\tmp = rte_pktmbuf_pool_create(pool_name, CPFL_NB_MBUF, CPFL_CACHE_SIZE,\n+\t\t\t\t\t     0, CPFL_MBUF_SIZE, dev->device->numa_node);\n+\t\tif (!mp) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to allocate mbuf pool for p2p\");\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t\tcpfl_vport->p2p_mp = mp;\n+\t}\n+\n+\tbufq->mp = mp;\n+\tbufq->nb_rx_desc = nb_desc;\n+\tbufq->queue_id = vport->chunks_info.rx_buf_start_qid + queue_idx;\n+\tbufq->port_id = dev->data->port_id;\n+\tbufq->adapter = adapter;\n+\tbufq->rx_buf_len = CPFL_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;\n+\n+\tbufq->sw_ring = rte_zmalloc(\"sw ring\",\n+\t\t\t\t    sizeof(struct rte_mbuf *) * nb_desc,\n+\t\t\t\t    RTE_CACHE_LINE_SIZE);\n+\tif (!bufq->sw_ring) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for SW ring\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\t/* Hairpin Rx buffer queue and Tx completion queue share the same HW ring */\n+\tif (peer_txq && peer_txq->complq->mz) {\n+\t\tmz = peer_txq->complq->mz;\n+\t\tbufq->rx_ring_phys_addr = mz->iova;\n+\t\tbufq->rx_ring = mz->addr;\n+\t\tbufq->mz = mz;\n+\t} else {\n+\t\tring_size = RTE_ALIGN(bufq->nb_rx_desc * CPFL_P2P_DESC_LEN,\n+\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n+\t\tmz = rte_eth_dma_zone_reserve(dev, \"hairpin_rx_buf_ring\", queue_idx,\n+\t\t\t\t\t      ring_size + CPFL_P2P_RING_BUF,\n+\t\t\t\t\t      CPFL_RING_BASE_ALIGN,\n+\t\t\t\t\t      dev->device->numa_node);\n+\t\tif (!mz) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory\"\n+\t\t\t\t\t  \"for hairpin RX buffer queue.\");\n+\t\t\trte_free(bufq->sw_ring);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\tbufq->rx_ring_phys_addr = mz->iova;\n+\t\tbufq->rx_ring = mz->addr;\n+\t\tbufq->mz = mz;\n+\t}\n+\treset_rx_hairpin_bufq(bufq);\n+\tbufq->q_set = true;\n+\tbufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +\n+\t\t\t queue_idx * vport->chunks_info.rx_buf_qtail_spacing);\n+\tbufq->ops = &def_rxq_ops;\n+\n+\treturn 0;\n+}\n+\n+int\n+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t    uint16_t nb_desc,\n+\t\t\t    const struct rte_eth_hairpin_conf *conf)\n+{\n+\tstruct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data->dev_private;\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n+\tstruct idpf_adapter *adapter_base = vport->adapter;\n+\tstruct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(adapter_base);\n+\tstruct cpfl_rxq_hairpin_info *hairpin_info;\n+\tstruct cpfl_vport *peer_cpfl_vport;\n+\tstruct rte_eth_dev_data *dev_data;\n+\tstruct cpfl_rx_queue *cpfl_rxq;\n+\tstruct cpfl_tx_queue *peer_txq = NULL;\n+\tstruct idpf_vport *peer_vport;\n+\tstruct idpf_rx_queue *bufq1 = NULL;\n+\tstruct idpf_rx_queue *rxq;\n+\tuint16_t peer_port = conf->peers[0].port;\n+\tuint16_t peer_q = conf->peers[0].queue;\n+\tconst struct rte_memzone *mz;\n+\tuint32_t ring_size;\n+\tuint16_t qid;\n+\tint ret;\n+\n+\tif (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\tPMD_INIT_LOG(ERR, \"Only spilt queue model supports hairpin queue.\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > CPFL_MAX_RING_DESC ||\n+\t    nb_desc < CPFL_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of receive descriptors is invalid\", nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Free memory if needed */\n+\tif (dev->data->rx_queues[queue_idx]) {\n+\t\tcpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);\n+\t\tdev->data->rx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Setup Rx description queue */\n+\tcpfl_rxq = rte_zmalloc_socket(\"cpfl hairpin rxq\",\n+\t\t\t\t sizeof(struct cpfl_rx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t SOCKET_ID_ANY);\n+\tif (!cpfl_rxq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for rx queue data structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\trxq = &(cpfl_rxq->base);\n+\thairpin_info = &(cpfl_rxq->hairpin_info);\n+\trxq->nb_rx_desc = nb_desc * 2;\n+\trxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;\n+\trxq->port_id = dev->data->port_id;\n+\trxq->adapter = adapter_base;\n+\thairpin_info->hairpin_q = true;\n+\trxq->rx_buf_len = CPFL_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;\n+\n+\tif (peer_port != dev->data->port_id)\n+\t\thairpin_info->hairpin_cv = true;\n+\thairpin_info->peer_txp = peer_port;\n+\tpeer_cpfl_vport = adapter->vports[peer_port];\n+\tpeer_vport = &(peer_cpfl_vport->base);\n+\tdev_data = peer_vport->dev_data;\n+\tif (peer_q < dev_data->nb_tx_queues)\n+\t\tpeer_txq = dev_data->tx_queues[peer_q];\n+\n+\t/* Hairpin Rxq and Txq share the same HW ring */\n+\tif (peer_txq && peer_txq->base.mz) {\n+\t\tmz = peer_txq->base.mz;\n+\t\trxq->rx_ring_phys_addr = mz->iova;\n+\t\trxq->rx_ring = mz->addr;\n+\t\trxq->mz = mz;\n+\t} else {\n+\t\tring_size = RTE_ALIGN(rxq->nb_rx_desc * CPFL_P2P_DESC_LEN,\n+\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n+\t\tmz = rte_eth_dma_zone_reserve(dev, \"hairpin_rx_ring\", queue_idx,\n+\t\t\t\t\t      ring_size + CPFL_P2P_RING_BUF,\n+\t\t\t\t\t      CPFL_RING_BASE_ALIGN,\n+\t\t\t\t\t      dev->device->numa_node);\n+\t\tif (!mz) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for RX\");\n+\t\t\tret = -ENOMEM;\n+\t\t\tgoto free_rxq;\n+\t\t}\n+\n+\t\trxq->rx_ring_phys_addr = mz->iova;\n+\t\trxq->rx_ring = mz->addr;\n+\t\trxq->mz = mz;\n+\t}\n+\treset_rx_hairpin_descq(rxq);\n+\n+\t/* setup 1 Rx buffer queue for 1 hairpin rxq */\n+\tbufq1 = rte_zmalloc_socket(\"hairpin rx bufq1\",\n+\t\t\t\t   sizeof(struct idpf_rx_queue),\n+\t\t\t\t   RTE_CACHE_LINE_SIZE,\n+\t\t\t\t   SOCKET_ID_ANY);\n+\tif (!bufq1) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for hairpin Rx buffer queue 1.\");\n+\t\tret = -ENOMEM;\n+\t\tgoto free_mz;\n+\t}\n+\tqid = 2 * queue_idx;\n+\tret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc, &(peer_txq->base));\n+\tif (ret) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to setup hairpin Rx buffer queue 1\");\n+\t\tret = -EINVAL;\n+\t\tgoto free_bufq1;\n+\t}\n+\trxq->bufq1 = bufq1;\n+\trxq->bufq2 = NULL;\n+\trxq->q_set = true;\n+\tdev->data->rx_queues[queue_idx] = cpfl_rxq;\n+\n+\treturn 0;\n+free_bufq1:\n+\trte_free(bufq1);\n+free_mz:\n+\trte_memzone_free(mz);\n+free_rxq:\n+\trte_free(rxq);\n+\n+\treturn ret;\n+}\n+\n+int\n+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t    uint16_t nb_desc,\n+\t\t\t    const struct rte_eth_hairpin_conf *conf)\n+{\n+\tstruct cpfl_vport *cpfl_vport =\n+\t    (struct cpfl_vport *)dev->data->dev_private;\n+\n+\tstruct idpf_vport *vport = &(cpfl_vport->base);\n+\tstruct idpf_adapter *adapter_base = vport->adapter;\n+\tstruct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(adapter_base);\n+\tstruct cpfl_txq_hairpin_info *hairpin_info;\n+\tstruct cpfl_vport *peer_cpfl_vport;\n+\tstruct rte_eth_dev_data *dev_data;\n+\tstruct idpf_vport *peer_vport;\n+\tstruct idpf_hw *hw = &adapter_base->hw;\n+\tstruct cpfl_tx_queue *cpfl_txq;\n+\tstruct idpf_tx_queue *txq, *cq;\n+\tstruct idpf_rx_queue *peer_rxq = NULL;\n+\tconst struct rte_memzone *mz;\n+\tuint32_t ring_size;\n+\tuint16_t peer_port = conf->peers[0].port;\n+\tuint16_t peer_q = conf->peers[0].queue;\n+\n+\tif (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {\n+\t\tPMD_INIT_LOG(ERR, \"Only spilt queue model supports hairpin queue.\");\n+\t\treturn -EINVAL;\n+\t}\n+\n+\tif (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||\n+\t    nb_desc > CPFL_MAX_RING_DESC ||\n+\t    nb_desc < CPFL_MIN_RING_DESC) {\n+\t\tPMD_INIT_LOG(ERR, \"Number (%u) of transmit descriptors is invalid\",\n+\t\t\t     nb_desc);\n+\t\treturn -EINVAL;\n+\t}\n+\n+\t/* Free memory if needed. */\n+\tif (dev->data->tx_queues[queue_idx]) {\n+\t\tcpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);\n+\t\tdev->data->tx_queues[queue_idx] = NULL;\n+\t}\n+\n+\t/* Allocate the TX queue data structure. */\n+\tcpfl_txq = rte_zmalloc_socket(\"cpfl hairpin txq\",\n+\t\t\t\t sizeof(struct cpfl_tx_queue),\n+\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t SOCKET_ID_ANY);\n+\tif (!cpfl_txq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\ttxq = &(cpfl_txq->base);\n+\thairpin_info = &(cpfl_txq->hairpin_info);\n+\t/* Txq ring length should be 2 times of Tx completion queue size. */\n+\ttxq->nb_tx_desc = nb_desc * 2;\n+\ttxq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;\n+\ttxq->port_id = dev->data->port_id;\n+\thairpin_info->hairpin_q = true;\n+\n+\tif (peer_port != dev->data->port_id)\n+\t\tcpfl_txq->hairpin_info.hairpin_cv = true;\n+\thairpin_info->peer_rxp = peer_port;\n+\tpeer_cpfl_vport = adapter->vports[peer_port];\n+\tpeer_vport = &(peer_cpfl_vport->base);\n+\thairpin_info->peer_rxq_id = peer_vport->chunks_info.rx_start_qid + conf->peers[0].queue;\n+\tdev_data = peer_vport->dev_data;\n+\tif (peer_q < dev_data->nb_rx_queues)\n+\t\tpeer_rxq = dev_data->rx_queues[peer_q];\n+\n+\t/* Hairpin Rxq and Txq share the same HW ring */\n+\tif (peer_rxq && peer_rxq->mz) {\n+\t\tmz = peer_rxq->mz;\n+\t\ttxq->tx_ring_phys_addr = mz->iova;\n+\t\ttxq->desc_ring = mz->addr;\n+\t\ttxq->mz = mz;\n+\t} else {\n+\t\tring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,\n+\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n+\t\tmz = rte_eth_dma_zone_reserve(dev, \"hairpin_tx_ring\", queue_idx,\n+\t\t\t\t\t      ring_size + CPFL_P2P_RING_BUF,\n+\t\t\t\t\t      CPFL_RING_BASE_ALIGN,\n+\t\t\t\t\t      dev->device->numa_node);\n+\t\tif (!mz) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX\");\n+\t\t\trte_free(txq->sw_ring);\n+\t\t\trte_free(txq);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\n+\t\ttxq->tx_ring_phys_addr = mz->iova;\n+\t\ttxq->desc_ring = mz->addr;\n+\t\ttxq->mz = mz;\n+\t}\n+\n+\treset_tx_hairpin_descq(txq);\n+\ttxq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +\n+\t\t\tqueue_idx * vport->chunks_info.tx_qtail_spacing);\n+\ttxq->ops = &def_txq_ops;\n+\n+\t/* Allocate the TX completion queue data structure. */\n+\ttxq->complq = rte_zmalloc_socket(\"cpfl hairpin cq\",\n+\t\t\t\t\t sizeof(struct idpf_tx_queue),\n+\t\t\t\t\t RTE_CACHE_LINE_SIZE,\n+\t\t\t\t\t dev->device->numa_node);\n+\tcq = txq->complq;\n+\tif (!cq) {\n+\t\tPMD_INIT_LOG(ERR, \"Failed to allocate memory for tx queue structure\");\n+\t\treturn -ENOMEM;\n+\t}\n+\n+\tcq->nb_tx_desc = nb_desc;\n+\tcq->queue_id = vport->chunks_info.tx_compl_start_qid + queue_idx;\n+\tcq->port_id = dev->data->port_id;\n+\thairpin_info->complq_peer_rxq_id =\n+\t    peer_vport->chunks_info.rx_buf_start_qid + conf->peers[0].queue * 2;\n+\n+\t/* Hairpin Rx buffer queue and Tx completion queue share the same HW ring */\n+\tif (peer_rxq && peer_rxq->bufq1->mz) {\n+\t\tmz = peer_rxq->bufq1->mz;\n+\t\tcq->tx_ring_phys_addr = mz->iova;\n+\t\tcq->compl_ring = mz->addr;\n+\t\tcq->mz = mz;\n+\t} else {\n+\t\tring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,\n+\t\t\t\t      CPFL_DMA_MEM_ALIGN);\n+\t\tmz = rte_eth_dma_zone_reserve(dev, \"hairpin_tx_compl_ring\", queue_idx,\n+\t\t\t\t\t      ring_size + CPFL_P2P_RING_BUF,\n+\t\t\t\t\t      CPFL_RING_BASE_ALIGN,\n+\t\t\t\t\t      dev->device->numa_node);\n+\t\tif (!mz) {\n+\t\t\tPMD_INIT_LOG(ERR, \"Failed to reserve DMA memory for TX completion queue\");\n+\t\t\trte_free(txq->sw_ring);\n+\t\t\trte_free(txq);\n+\t\t\treturn -ENOMEM;\n+\t\t}\n+\t\tcq->tx_ring_phys_addr = mz->iova;\n+\t\tcq->compl_ring = mz->addr;\n+\t\tcq->mz = mz;\n+\t}\n+\n+\treset_tx_hairpin_complq(cq);\n+\n+\ttxq->q_set = true;\n+\tdev->data->tx_queues[queue_idx] = cpfl_txq;\n+\n+\treturn 0;\n+}\ndiff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h\nindex e241afece9..d4790d60ae 100644\n--- a/drivers/net/cpfl/cpfl_rxtx.h\n+++ b/drivers/net/cpfl/cpfl_rxtx.h\n@@ -9,12 +9,17 @@\n #include \"cpfl_ethdev.h\"\n \n /* In QLEN must be whole number of 32 descriptors. */\n-#define CPFL_ALIGN_RING_DESC\t32\n-#define CPFL_MIN_RING_DESC\t32\n-#define CPFL_MAX_RING_DESC\t4096\n-#define CPFL_DMA_MEM_ALIGN\t4096\n+#define CPFL_ALIGN_RING_DESC\t\t32\n+#define CPFL_MIN_RING_DESC\t\t32\n+#define CPFL_MAX_RING_DESC\t\t4096\n+#define CPFL_DMA_MEM_ALIGN\t\t4096\n+#define CPFL_P2P_DESC_LEN\t\t16\n+#define CPFL_MAX_HAIRPINQ_RX_2_TX\t1\n+#define CPFL_MAX_HAIRPINQ_TX_2_RX\t1\n+#define CPFL_MAX_HAIRPINQ_NB_DESC\t1024\n+#define CPFL_MAX_NB_QUEUES\t\t16\n /* Base address of the HW descriptor ring should be 128B aligned. */\n-#define CPFL_RING_BASE_ALIGN\t128\n+#define CPFL_RING_BASE_ALIGN\t\t128\n \n #define CPFL_DEFAULT_RX_FREE_THRESH\t32\n \n@@ -69,4 +74,11 @@ void cpfl_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n void cpfl_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);\n void cpfl_set_rx_function(struct rte_eth_dev *dev);\n void cpfl_set_tx_function(struct rte_eth_dev *dev);\n+int\n+cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t    uint16_t nb_desc, const struct rte_eth_hairpin_conf *conf);\n+int\n+cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,\n+\t\t\t    uint16_t nb_desc,\n+\t\t\t    const struct rte_eth_hairpin_conf *conf);\n #endif /* _CPFL_RXTX_H_ */\n",
    "prefixes": [
        "v2",
        "3/5"
    ]
}